hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0ec0692089769b77692fbc84bee5d321e8bade
| 7,739
|
py
|
Python
|
docx/document.py
|
AllianceSoftware/python-docx
|
baa8bb7787856ce2f884ce7dec4faf28207b861b
|
[
"MIT"
] | null | null | null |
docx/document.py
|
AllianceSoftware/python-docx
|
baa8bb7787856ce2f884ce7dec4faf28207b861b
|
[
"MIT"
] | null | null | null |
docx/document.py
|
AllianceSoftware/python-docx
|
baa8bb7787856ce2f884ce7dec4faf28207b861b
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
|Document| and closely related objects
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from .blkcntnr import BlockItemContainer
from .enum.section import WD_SECTION
from .enum.text import WD_BREAK
from .section import Section, Sections
from .shared import ElementProxy, Emu
class Document(ElementProxy):
"""
WordprocessingML (WML) document. Not intended to be constructed directly.
Use :func:`docx.Document` to open or create a document.
"""
__slots__ = ('_part', '__body')
def __init__(self, element, part):
super(Document, self).__init__(element)
self._part = part
self.__body = None
def add_heading(self, text='', level=1):
"""
Return a heading paragraph newly added to the end of the document,
containing *text* and having its paragraph style determined by
*level*. If *level* is 0, the style is set to `Title`. If *level* is
1 (or omitted), `Heading 1` is used. Otherwise the style is set to
`Heading {level}`. Raises |ValueError| if *level* is outside the
range 0-9.
"""
if not 0 <= level <= 9:
raise ValueError("level must be in range 0-9, got %d" % level)
style = 'Title' if level == 0 else 'Heading %d' % level
return self.add_paragraph(text, style)
def add_page_break(self):
"""
Return a paragraph newly added to the end of the document and
containing only a page break.
"""
paragraph = self.add_paragraph()
paragraph.add_run().add_break(WD_BREAK.PAGE)
return paragraph
def get_new_list(self, abstractNumId):
"""
Returns a new numId that references given abstractNumId
"""
return self.numbering.numbering_definitions.add_num(abstractNumId, True)
def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the document, populated
with *text* and having paragraph style *style*. *text* can contain
tab (``\\t``) characters, which are converted to the appropriate XML
form for a tab. *text* can also include newline (``\\n``) or carriage
return (``\\r``) characters, each of which is converted to a line
break.
"""
return self._body.add_paragraph(text, style)
def add_picture(self, image_path_or_stream, width=None, height=None):
"""
Return a new picture shape added in its own paragraph at the end of
the document. The picture contains the image at
*image_path_or_stream*, scaled based on *width* and *height*. If
neither width nor height is specified, the picture appears at its
native size. If only one is specified, it is used to compute
a scaling factor that is then applied to the unspecified dimension,
preserving the aspect ratio of the image. The native size of the
picture is calculated using the dots-per-inch (dpi) value specified
in the image file, defaulting to 72 dpi if no value is specified, as
is often the case.
"""
run = self.add_paragraph().add_run()
return run.add_picture(image_path_or_stream, width, height)
def add_section(self, start_type=WD_SECTION.NEW_PAGE):
"""
Return a |Section| object representing a new section added at the end
of the document. The optional *start_type* argument must be a member
of the :ref:`WdSectionStart` enumeration, and defaults to
``WD_SECTION.NEW_PAGE`` if not provided.
"""
new_sectPr = self._element.body.add_section_break()
new_sectPr.start_type = start_type
return Section(new_sectPr)
def add_table(self, rows, cols, style=None):
"""
Add a table having row and column counts of *rows* and *cols*
respectively and table style of *style*. *style* may be a paragraph
style object or a paragraph style name. If *style* is |None|, the
table inherits the default table style of the document.
"""
table = self._body.add_table(rows, cols, self._block_width)
table.style = style
return table
@property
def core_properties(self):
"""
A |CoreProperties| object providing read/write access to the core
properties of this document.
"""
return self._part.core_properties
@property
def inline_shapes(self):
"""
An |InlineShapes| object providing access to the inline shapes in
this document. An inline shape is a graphical object, such as
a picture, contained in a run of text and behaving like a character
glyph, being flowed like other text in a paragraph.
"""
return self._part.inline_shapes
@property
def paragraphs(self):
"""
A list of |Paragraph| instances corresponding to the paragraphs in
the document, in document order. Note that paragraphs within revision
marks such as ``<w:ins>`` or ``<w:del>`` do not appear in this list.
"""
return self._body.paragraphs
@property
def part(self):
"""
The |DocumentPart| object of this document.
"""
return self._part
def save(self, path_or_stream):
"""
Save this document to *path_or_stream*, which can be either a path to
a filesystem location (a string) or a file-like object.
"""
self._part.save(path_or_stream)
@property
def sections(self):
"""
A |Sections| object providing access to each section in this
document.
"""
return Sections(self._element)
@property
def styles(self):
"""
A |Styles| object providing access to the styles in this document.
"""
return self._part.styles
@property
def numbering(self):
"""
A "Provides access to numbering part
"""
x=self._part.numbering_part
return self._part.numbering_part
@property
def tables(self):
"""
A list of |Table| instances corresponding to the tables in the
document, in document order. Note that only tables appearing at the
top level of the document appear in this list; a table nested inside
a table cell does not appear. A table within revision marks such as
``<w:ins>`` or ``<w:del>`` will also not appear in the list.
"""
return self._body.tables
@property
def _block_width(self):
"""
Return a |Length| object specifying the width of available "writing"
space between the margins of the last section of this document.
"""
section = self.sections[-1]
return Emu(
section.page_width - section.left_margin - section.right_margin
)
@property
def _body(self):
"""
The |_Body| instance containing the content for this document.
"""
if self.__body is None:
self.__body = _Body(self._element.body, self)
return self.__body
class _Body(BlockItemContainer):
"""
Proxy for ``<w:body>`` element in this document, having primarily a
container role.
"""
def __init__(self, body_elm, parent):
super(_Body, self).__init__(body_elm, parent)
self._body = body_elm
def clear_content(self):
"""
Return this |_Body| instance after clearing it of all content.
Section properties for the main document story, if present, are
preserved.
"""
self._body.clear_content()
return self
| 34.86036
| 80
| 0.632511
|
4a0ec0cb22d3176fa728c0113b48a78f2995883e
| 3,779
|
py
|
Python
|
tests/test_stream.py
|
unduli-attacked/camelot
|
f53ed24d2f657968753cdce15cfcc64929c0a546
|
[
"MIT"
] | null | null | null |
tests/test_stream.py
|
unduli-attacked/camelot
|
f53ed24d2f657968753cdce15cfcc64929c0a546
|
[
"MIT"
] | null | null | null |
tests/test_stream.py
|
unduli-attacked/camelot
|
f53ed24d2f657968753cdce15cfcc64929c0a546
|
[
"MIT"
] | 1
|
2022-01-24T03:52:41.000Z
|
2022-01-24T03:52:41.000Z
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import camelot
from camelot.core import Table, TableList
from camelot.__version__ import generate_version
from .data import *
testdir = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(testdir, "files")
def test_stream():
df = pd.DataFrame(data_stream)
filename = os.path.join(testdir, "health.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
def test_stream_table_rotated():
df = pd.DataFrame(data_stream_table_rotated)
filename = os.path.join(testdir, "clockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
filename = os.path.join(testdir, "anticlockwise_table_2.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
def test_stream_two_tables():
df1 = pd.DataFrame(data_stream_two_tables_1)
df2 = pd.DataFrame(data_stream_two_tables_2)
filename = os.path.join(testdir, "tabula/12s0324.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert len(tables) == 2
assert df1.equals(tables[0].df)
assert df2.equals(tables[1].df)
def test_stream_table_regions():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", table_regions=["320,460,573,335"]
)
assert_frame_equal(df, tables[0].df)
def test_stream_table_areas():
df = pd.DataFrame(data_stream_table_areas)
filename = os.path.join(testdir, "tabula/us-007.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", table_areas=["320,500,573,335"]
)
assert_frame_equal(df, tables[0].df)
def test_stream_columns():
df = pd.DataFrame(data_stream_columns)
filename = os.path.join(testdir, "mexican_towns.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", columns=["67,180,230,425,475"], row_tol=10
)
assert_frame_equal(df, tables[0].df)
def test_stream_split_text():
df = pd.DataFrame(data_stream_split_text)
filename = os.path.join(testdir, "tabula/m27.pdf")
tables = camelot.read_pdf(
filename,
flavor="stream",
columns=["72,95,209,327,442,529,566,606,683"],
split_text=True,
)
assert_frame_equal(df, tables[0].df)
def test_stream_flag_size():
df = pd.DataFrame(data_stream_flag_size)
filename = os.path.join(testdir, "superscript.pdf")
tables = camelot.read_pdf(filename, flavor="stream", flag_size=True)
assert_frame_equal(df, tables[0].df)
def test_stream_strip_text():
df = pd.DataFrame(data_stream_strip_text)
filename = os.path.join(testdir, "detect_vertical_false.pdf")
tables = camelot.read_pdf(filename, flavor="stream", strip_text=" ,\n")
assert_frame_equal(df, tables[0].df)
def test_stream_edge_tol():
df = pd.DataFrame(data_stream_edge_tol)
filename = os.path.join(testdir, "edge_tol.pdf")
tables = camelot.read_pdf(filename, flavor="stream", edge_tol=500)
assert_frame_equal(df, tables[0].df)
def test_stream_layout_kwargs():
df = pd.DataFrame(data_stream_layout_kwargs)
filename = os.path.join(testdir, "detect_vertical_false.pdf")
tables = camelot.read_pdf(
filename, flavor="stream", layout_kwargs={"detect_vertical": False}
)
assert_frame_equal(df, tables[0].df)
def test_stream_duplicated_text():
df = pd.DataFrame(data_stream_duplicated_text)
filename = os.path.join(testdir, "birdisland.pdf")
tables = camelot.read_pdf(filename, flavor="stream")
assert_frame_equal(df, tables[0].df)
| 28.413534
| 77
| 0.708918
|
4a0ec1fa87f4aa9e486da9585d36e4f3e34783e8
| 11,492
|
py
|
Python
|
homeassistant/components/media_player/gpmdp.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 7
|
2018-08-03T10:15:36.000Z
|
2019-03-25T13:31:55.000Z
|
homeassistant/components/media_player/gpmdp.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:06:43.000Z
|
2022-03-12T00:56:04.000Z
|
homeassistant/components/media_player/gpmdp.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3
|
2018-10-09T08:37:48.000Z
|
2019-11-16T08:32:27.000Z
|
"""
Support for Google Play Music Desktop Player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.gpmdp/
"""
import logging
import json
import socket
import time
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK,
SUPPORT_PAUSE, SUPPORT_VOLUME_SET, SUPPORT_SEEK, SUPPORT_PLAY,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
STATE_PLAYING, STATE_PAUSED, STATE_OFF, CONF_HOST, CONF_PORT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['websocket-client==0.37.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'GPM Desktop Player'
DEFAULT_PORT = 5672
GPMDP_CONFIG_FILE = 'gpmpd.conf'
SUPPORT_GPMDP = SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SEEK | SUPPORT_VOLUME_SET | SUPPORT_PLAY
PLAYBACK_DICT = {'0': STATE_PAUSED, # Stopped
'1': STATE_PAUSED,
'2': STATE_PLAYING}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def request_configuration(hass, config, url, add_devices_callback):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
if 'gpmdp' in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['gpmdp'], "Failed to register, please try again.")
return
from websocket import create_connection
websocket = create_connection((url), timeout=1)
websocket.send(json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant']}))
def gpmdp_configuration_callback(callback_data):
"""Handle configuration changes."""
while True:
from websocket import _exceptions
try:
msg = json.loads(websocket.recv())
except _exceptions.WebSocketConnectionClosedException:
continue
if msg['channel'] != 'connect':
continue
if msg['payload'] != "CODE_REQUIRED":
continue
pin = callback_data.get('pin')
websocket.send(json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant', pin]}))
tmpmsg = json.loads(websocket.recv())
if tmpmsg['channel'] == 'time':
_LOGGER.error("Error setting up GPMDP. Please pause "
"the desktop player and try again")
break
code = tmpmsg['payload']
if code == 'CODE_REQUIRED':
continue
setup_gpmdp(hass, config, code,
add_devices_callback)
save_json(hass.config.path(GPMDP_CONFIG_FILE), {"CODE": code})
websocket.send(json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant', code]}))
websocket.close()
break
_CONFIGURING['gpmdp'] = configurator.request_config(
DEFAULT_NAME, gpmdp_configuration_callback,
description=(
'Enter the pin that is displayed in the '
'Google Play Music Desktop Player.'),
submit_caption="Submit",
fields=[{'id': 'pin', 'name': 'Pin Code', 'type': 'number'}]
)
def setup_gpmdp(hass, config, code, add_devices):
"""Set up gpmdp."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'ws://{}:{}'.format(host, port)
if not code:
request_configuration(hass, config, url, add_devices)
return
if 'gpmdp' in _CONFIGURING:
configurator = hass.components.configurator
configurator.request_done(_CONFIGURING.pop('gpmdp'))
add_devices([GPMDP(name, url, code)], True)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the GPMDP platform."""
codeconfig = load_json(hass.config.path(GPMDP_CONFIG_FILE))
if codeconfig:
code = codeconfig.get('CODE')
elif discovery_info is not None:
if 'gpmdp' in _CONFIGURING:
return
code = None
else:
code = None
setup_gpmdp(hass, config, code, add_devices)
class GPMDP(MediaPlayerDevice):
"""Representation of a GPMDP."""
def __init__(self, name, url, code):
"""Initialize the media player."""
from websocket import create_connection
self._connection = create_connection
self._url = url
self._authorization_code = code
self._name = name
self._status = STATE_OFF
self._ws = None
self._title = None
self._artist = None
self._albumart = None
self._seek_position = None
self._duration = None
self._volume = None
self._request_id = 0
def get_ws(self):
"""Check if the websocket is setup and connected."""
if self._ws is None:
try:
self._ws = self._connection((self._url), timeout=1)
msg = json.dumps({'namespace': 'connect',
'method': 'connect',
'arguments': ['Home Assistant',
self._authorization_code]})
self._ws.send(msg)
except (socket.timeout, ConnectionRefusedError,
ConnectionResetError):
self._ws = None
return self._ws
def send_gpmdp_msg(self, namespace, method, with_id=True):
"""Send ws messages to GPMDP and verify request id in response."""
from websocket import _exceptions
try:
websocket = self.get_ws()
if websocket is None:
self._status = STATE_OFF
return
self._request_id += 1
websocket.send(json.dumps({'namespace': namespace,
'method': method,
'requestID': self._request_id}))
if not with_id:
return
while True:
msg = json.loads(websocket.recv())
if 'requestID' in msg:
if msg['requestID'] == self._request_id:
return msg
except (ConnectionRefusedError, ConnectionResetError,
_exceptions.WebSocketTimeoutException,
_exceptions.WebSocketProtocolException,
_exceptions.WebSocketPayloadException,
_exceptions.WebSocketConnectionClosedException):
self._ws = None
def update(self):
"""Get the latest details from the player."""
time.sleep(1)
playstate = self.send_gpmdp_msg('playback', 'getPlaybackState')
if playstate is None:
return
self._status = PLAYBACK_DICT[str(playstate['value'])]
time_data = self.send_gpmdp_msg('playback', 'getCurrentTime')
if time_data is not None:
self._seek_position = int(time_data['value'] / 1000)
track_data = self.send_gpmdp_msg('playback', 'getCurrentTrack')
if track_data is not None:
self._title = track_data['value']['title']
self._artist = track_data['value']['artist']
self._albumart = track_data['value']['albumArt']
self._duration = int(track_data['value']['duration'] / 1000)
volume_data = self.send_gpmdp_msg('volume', 'getVolume')
if volume_data is not None:
self._volume = volume_data['value'] / 100
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
return self._status
@property
def media_title(self):
"""Title of current playing media."""
return self._title
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._artist
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._albumart
@property
def media_seek_position(self):
"""Time in seconds of current seek position."""
return self._seek_position
@property
def media_duration(self):
"""Time in seconds of current song duration."""
return self._duration
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_GPMDP
def media_next_track(self):
"""Send media_next command to media player."""
self.send_gpmdp_msg('playback', 'forward', False)
def media_previous_track(self):
"""Send media_previous command to media player."""
self.send_gpmdp_msg('playback', 'rewind', False)
def media_play(self):
"""Send media_play command to media player."""
self.send_gpmdp_msg('playback', 'playPause', False)
self._status = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send media_pause command to media player."""
self.send_gpmdp_msg('playback', 'playPause', False)
self._status = STATE_PAUSED
self.schedule_update_ha_state()
def media_seek(self, position):
"""Send media_seek command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send(json.dumps({'namespace': 'playback',
'method': 'setCurrentTime',
'arguments': [position*1000]}))
self.schedule_update_ha_state()
def volume_up(self):
"""Send volume_up command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send('{"namespace": "volume", "method": "increaseVolume"}')
self.schedule_update_ha_state()
def volume_down(self):
"""Send volume_down command to media player."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send('{"namespace": "volume", "method": "decreaseVolume"}')
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set volume on media player, range(0..1)."""
websocket = self.get_ws()
if websocket is None:
return
websocket.send(json.dumps({'namespace': 'volume',
'method': 'setVolume',
'arguments': [volume*100]}))
self.schedule_update_ha_state()
| 35.578947
| 79
| 0.594501
|
4a0ec260782d984623b969327771e8afbf92abc0
| 2,991
|
py
|
Python
|
backend/workout/models.py
|
Soobian/Project_IO
|
a420a05deace219e1edb582cc47c533aa48ca41b
|
[
"BSD-3-Clause"
] | null | null | null |
backend/workout/models.py
|
Soobian/Project_IO
|
a420a05deace219e1edb582cc47c533aa48ca41b
|
[
"BSD-3-Clause"
] | null | null | null |
backend/workout/models.py
|
Soobian/Project_IO
|
a420a05deace219e1edb582cc47c533aa48ca41b
|
[
"BSD-3-Clause"
] | 3
|
2021-11-12T09:57:46.000Z
|
2022-01-18T21:08:04.000Z
|
"""
Plik zawiera klasy reprezentujące utworzone tabele w bazie danych z odpowiednimi atrybutami
- MuscleGroup: Tabela reprezentująca grupy mięśni
- Exercise: Tabela przechowująca informacje na temat ćwiczeń
- WorkoutPlan: Tabela zawierająca informacje na temat dostępnych planów ćwiczeniowych
- WorkoutPlanDay: Tabela zawierająca szczegółowe dane na temat danych planów ćwiczeniowych
- WorkoutPlanDayExercise: Tabela zbiorów ćwiczeń dostępnych w ramach danego planu ćwiczeń
- WorkoutPlanDayExerciseSets: Tabela zawierająca dokładne dane na temat sposobu wykonania ćwiczenia
@author Aneta Postrożny
"""
from django.db import models
from users.models import CustomUser
#from exercise.models import Exercise
class MuscleGroup(models.Model):
name = models.TextField(max_length=100, blank=False, null=False)
description = models.TextField(max_length=500, blank=False, null=False)
photo_link = models.TextField(max_length=500, blank=False, null=True)
def __str__(self) -> str:
return self.name
class Exercise(models.Model):
name = models.TextField(max_length=100, blank=False, null=False)
description = models.TextField(max_length=500, blank=False, null=False)
photo_link = models.TextField(max_length=500, blank=False, null=True)
musclegroups = models.ManyToManyField(MuscleGroup)
def __str__(self) -> str:
return self.name
class WorkoutPlan(models.Model):
LEVELS = (
(1, 'Beginner'),
(2, 'Intermediate'),
(3, 'Advanced'),
)
userId = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
name = models.TextField(max_length=30, blank=False, null=False)
level = models.IntegerField(choices=LEVELS, null=False)
description = models.TextField(max_length=500, blank=False, null=False)
photo_link = models.TextField(max_length=500, blank=False, null=False)
def __str__(self) -> str:
return self.name
class WorkoutPlanDay(models.Model):
workoutPlanId = models.ForeignKey(WorkoutPlan, related_name='workoutplanday', on_delete=models.CASCADE)
name = models.TextField(max_length=30, blank=False, null=False)
description = models.TextField(max_length=500, blank=False, null=False)
def __str__(self) -> str:
return self.name
class WorkoutPlanDayExercise(models.Model):
workoutPlanDayId = models.ForeignKey(WorkoutPlanDay, related_name='workoutplanexercises', on_delete=models.CASCADE)
exerciseId = models.ForeignKey(Exercise, on_delete=models.CASCADE)
def __str__(self) -> str:
return str(self.workoutPlanDayId) + str(self.exerciseId)
class WorkoutPlanDayExerciseSets(models.Model):
workoutPlanDayExerciseId = models.ForeignKey(WorkoutPlanDayExercise, related_name='workoutplanexercisessets', on_delete=models.CASCADE)
reps = models.IntegerField(blank=False, null=False)
series = models.IntegerField(blank=False, null=False, default=5)
def __str__(self) -> str:
return self.workoutPlanDayExerciseId
| 37.3875
| 139
| 0.755934
|
4a0ec41dd9d36edaa0c893302277ddaa0dfe7339
| 1,349
|
py
|
Python
|
price_monitoring.py
|
happysms/opensource_turtle_bot
|
612d3780f4de8d5c58916e73b1c6468e9737eed0
|
[
"Apache-2.0"
] | 1
|
2021-11-16T08:59:01.000Z
|
2021-11-16T08:59:01.000Z
|
price_monitoring.py
|
happysms/opensource_turtle_bot
|
612d3780f4de8d5c58916e73b1c6468e9737eed0
|
[
"Apache-2.0"
] | null | null | null |
price_monitoring.py
|
happysms/opensource_turtle_bot
|
612d3780f4de8d5c58916e73b1c6468e9737eed0
|
[
"Apache-2.0"
] | null | null | null |
import ccxt
import pandas as pd
import time
import logger
from monitoring_util import check_trading_condition, request_order, get_trade_list, get_price_info
import json
import requests
binance = ccxt.binance()
logger = logger.make_logger("mylogger")
trade_list = get_trade_list()
while True:
try:
for trade in trade_list:
price_info = get_price_info(trade, binance)
trade_condition = check_trading_condition(trade, price_info)
if trade_condition == "enter_long":
trade['info']['position'] = "long"
request_order(trade["symbol"], "long", price_info['max_price'], trade_condition)
elif trade_condition == "enter_short":
trade['info']['position'] = "short"
request_order(trade["symbol"], "short", price_info['min_price'], trade_condition)
elif trade_condition == "exit_long":
trade['info']['position'] = None
request_order(trade["symbol"], "long", price_info['long_exit_price'], trade_condition)
elif trade_condition == "exit_short":
trade['info']['position'] = None
request_order(trade["symbol"], "short", price_info['short_exit_price'], trade_condition)
time.sleep(0.1)
except Exception as e:
logger.error(e)
| 33.725
| 104
| 0.636027
|
4a0ec466c0d467acb93d3bafe28a43e59720e169
| 983
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/sub_resource_read_only.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/sub_resource_read_only.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/sub_resource_read_only.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubResourceReadOnly(Model):
"""SubResourceReadOnly.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SubResourceReadOnly, self).__init__(**kwargs)
self.id = None
| 27.305556
| 76
| 0.562564
|
4a0ec46c649a2b2a756b62156ae70a594bdb1110
| 196
|
py
|
Python
|
{{cookiecutter.project_slug}}/src/models/__init__.py
|
ardydedase/cookiecutter-flask-postgres-api
|
e6a9095a31d54f024a30a44e368f4a0a8f15829b
|
[
"MIT"
] | 20
|
2020-05-24T15:07:27.000Z
|
2021-08-24T04:58:06.000Z
|
src/models/__init__.py
|
ardydedase/flask-postgres-api
|
d9edb69988f89f480d595a8b111fafeb7ae81234
|
[
"MIT"
] | null | null | null |
src/models/__init__.py
|
ardydedase/flask-postgres-api
|
d9edb69988f89f480d595a8b111fafeb7ae81234
|
[
"MIT"
] | 6
|
2020-08-11T10:58:30.000Z
|
2021-09-09T03:57:37.000Z
|
# flake8: noqa
# TODO: check if there is a better way
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from .abc import BaseModel
from .user import User
__all__ = ['BaseModel', 'User']
| 19.6
| 39
| 0.744898
|
4a0ec5a9bd60fcecb8258d44c1b88eb6b63378d8
| 10,349
|
py
|
Python
|
jinja2/testsuite/api.py
|
prasenjit/ffbird
|
057873a500738383612ed21e93348dfb9ee19af2
|
[
"Apache-2.0"
] | 4
|
2016-02-18T15:11:58.000Z
|
2020-01-16T11:07:50.000Z
|
jinja2/testsuite/api.py
|
prasenjit/ffbird
|
057873a500738383612ed21e93348dfb9ee19af2
|
[
"Apache-2.0"
] | 6
|
2018-06-21T19:45:01.000Z
|
2018-06-21T19:45:02.000Z
|
jinja2/testsuite/api.py
|
prasenjit/ffbird
|
057873a500738383612ed21e93348dfb9ee19af2
|
[
"Apache-2.0"
] | 5
|
2018-11-24T11:19:49.000Z
|
2022-03-25T00:23:03.000Z
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.api
~~~~~~~~~~~~~~~~~~~~
Tests the public API and related stuff.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
import os
import tempfile
import shutil
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, Undefined, DebugUndefined, \
StrictUndefined, UndefinedError, meta, \
is_undefined, Template, DictLoader
from jinja2.utils import Cycler
env = Environment()
class ExtendedAPITestCase(JinjaTestCase):
def test_item_and_attribute(self):
from jinja2.sandbox import SandboxedEnvironment
for env in Environment(), SandboxedEnvironment():
# the |list is necessary for python3
tmpl = env.from_string('{{ foo.items()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo|attr("items")()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo["items"] }}')
assert tmpl.render(foo={'items': 42}) == '42'
def test_finalizer(self):
def finalize_none_empty(value):
if value is None:
value = u''
return value
env = Environment(finalize=finalize_none_empty)
tmpl = env.from_string('{% for item in seq %}|{{ item }}{% endfor %}')
assert tmpl.render(seq=(None, 1, "foo")) == '||1|foo'
tmpl = env.from_string('<{{ none }}>')
assert tmpl.render() == '<>'
def test_cycler(self):
items = 1, 2, 3
c = Cycler(*items)
for item in items + items:
assert c.current == item
assert next(c) == item
next(c)
assert c.current == 2
c.reset()
assert c.current == 1
def test_expressions(self):
expr = env.compile_expression("foo")
assert expr() is None
assert expr(foo=42) == 42
expr2 = env.compile_expression("foo", undefined_to_none=False)
assert is_undefined(expr2())
expr = env.compile_expression("42 + foo")
assert expr(foo=42) == 84
def test_template_passthrough(self):
t = Template('Content')
assert env.get_template(t) is t
assert env.select_template([t]) is t
assert env.get_or_select_template([t]) is t
assert env.get_or_select_template(t) is t
def test_autoescape_autoselect(self):
def select_autoescape(name):
if name is None or '.' not in name:
return False
return name.endswith('.html')
env = Environment(autoescape=select_autoescape,
loader=DictLoader({
'test.txt': '{{ foo }}',
'test.html': '{{ foo }}'
}))
t = env.get_template('test.txt')
assert t.render(foo='<foo>') == '<foo>'
t = env.get_template('test.html')
assert t.render(foo='<foo>') == '<foo>'
t = env.from_string('{{ foo }}')
assert t.render(foo='<foo>') == '<foo>'
class MetaTestCase(JinjaTestCase):
def test_find_undeclared_variables(self):
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar'])
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}'
'{% macro meh(x) %}{{ x }}{% endmacro %}'
'{% for item in seq %}{{ muh(item) + meh(seq) }}{% endfor %}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar', 'seq', 'muh'])
def test_find_refererenced_templates(self):
ast = env.parse('{% extends "layout.html" %}{% include helper %}')
i = meta.find_referenced_templates(ast)
assert next(i) == 'layout.html'
assert next(i) is None
assert list(i) == []
ast = env.parse('{% extends "layout.html" %}'
'{% from "test.html" import a, b as c %}'
'{% import "meh.html" as meh %}'
'{% include "muh.html" %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['layout.html', 'test.html', 'meh.html', 'muh.html']
def test_find_included_templates(self):
ast = env.parse('{% include ["foo.html", "bar.html"] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ("foo.html", "bar.html") %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ["foo.html", "bar.html", foo] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
ast = env.parse('{% include ("foo.html", "bar.html", foo) %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
class StreamingTestCase(JinjaTestCase):
def test_basic_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=list(range(4)))
self.assert_equal(next(stream), '<ul>')
self.assert_equal(next(stream), '<li>1 - 0</li>')
self.assert_equal(next(stream), '<li>2 - 1</li>')
self.assert_equal(next(stream), '<li>3 - 2</li>')
self.assert_equal(next(stream), '<li>4 - 3</li>')
self.assert_equal(next(stream), '</ul>')
def test_buffered_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=list(range(4)))
stream.enable_buffering(size=3)
self.assert_equal(next(stream), u'<ul><li>1 - 0</li><li>2 - 1</li>')
self.assert_equal(next(stream), u'<li>3 - 2</li><li>4 - 3</li></ul>')
def test_streaming_behavior(self):
tmpl = env.from_string("")
stream = tmpl.stream()
assert not stream.buffered
stream.enable_buffering(20)
assert stream.buffered
stream.disable_buffering()
assert not stream.buffered
def test_dump_stream(self):
tmp = tempfile.mkdtemp()
try:
tmpl = env.from_string(u"\u2713")
stream = tmpl.stream()
stream.dump(os.path.join(tmp, 'dump.txt'), 'utf-8')
with open(os.path.join(tmp, 'dump.txt'), 'rb') as f:
self.assertEqual(f.read(), b'\xe2\x9c\x93')
finally:
shutil.rmtree(tmp)
class UndefinedTestCase(JinjaTestCase):
def test_stopiteration_is_undefined(self):
def test():
raise StopIteration()
t = Template('A{{ test() }}B')
assert t.render(test=test) == 'AB'
t = Template('A{{ test().missingattribute }}B')
self.assert_raises(UndefinedError, t.render, test=test)
def test_undefined_and_special_attributes(self):
try:
Undefined('Foo').__dict__
except AttributeError:
pass
else:
assert False, "Expected actual attribute error"
def test_default_undefined(self):
env = Environment(undefined=Undefined)
self.assert_equal(env.from_string('{{ missing }}').render(), u'')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42), '')
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_debug_undefined(self):
env = Environment(undefined=DebugUndefined)
self.assert_equal(env.from_string('{{ missing }}').render(), '{{ missing }}')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42),
u"{{ no such element: int object['missing'] }}")
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_strict_undefined(self):
env = Environment(undefined=StrictUndefined)
self.assert_raises(UndefinedError, env.from_string('{{ missing }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing.attribute }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing|list }}').render)
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_raises(UndefinedError, env.from_string('{{ foo.missing }}').render, foo=42)
self.assert_raises(UndefinedError, env.from_string('{{ not missing }}').render)
self.assert_equal(env.from_string('{{ missing|default("default", true) }}').render(), 'default')
def test_indexing_gives_undefined(self):
t = Template("{{ var[42].foo }}")
self.assert_raises(UndefinedError, t.render, var=0)
def test_none_gives_proper_error(self):
try:
Environment().getattr(None, 'split')()
except UndefinedError as e:
assert e.message == "'None' has no attribute 'split'"
else:
assert False, 'expected exception'
def test_object_repr(self):
try:
Undefined(obj=42, name='upper')()
except UndefinedError as e:
assert e.message == "'int object' has no attribute 'upper'"
else:
assert False, 'expected exception'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtendedAPITestCase))
suite.addTest(unittest.makeSuite(MetaTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
suite.addTest(unittest.makeSuite(UndefinedTestCase))
return suite
| 39.651341
| 104
| 0.578413
|
4a0ec67db2b8e8566b8a4fc59f59e4331c5df77b
| 4,469
|
py
|
Python
|
cltk/text_reuse/comparison.py
|
Akash-Pardasani/cltk
|
2a430e9407452b06f44847202ebce8446007d96b
|
[
"MIT"
] | null | null | null |
cltk/text_reuse/comparison.py
|
Akash-Pardasani/cltk
|
2a430e9407452b06f44847202ebce8446007d96b
|
[
"MIT"
] | null | null | null |
cltk/text_reuse/comparison.py
|
Akash-Pardasani/cltk
|
2a430e9407452b06f44847202ebce8446007d96b
|
[
"MIT"
] | 1
|
2019-06-16T06:41:47.000Z
|
2019-06-16T06:41:47.000Z
|
"""
A comparison class to help with tracking string comparison values
"""
from cltk.utils.cltk_logger import logger
__author__ = 'Luke Hollis <lukehollis@gmail.com>'
__license__ = 'MIT License. See LICENSE.'
class Comparison:
"""A class to increase ease of working with text reuse data."""
def __init__(self, str_a, str_b, distance_ratio):
"""
Initialize class with compared strings and ratio of comparison
:param str_a: str
:param str_b: str
:param distance_ratio: float
"""
self.str_a = str_a
self.str_b = str_b
self.ratio = distance_ratio
# The authors related to the compared string values
self.author_a = None
self.author_b = None
# The works related to the compared string values
self.work_a = None
self.work_b = None
# The subworks related to the compared string values
self.subwork_a = None
self.subwork_b = None
# The text numbers related to the compared string values
# e.g. 10 (for line 10) or 3 (for paragraph 3)
self.text_n_a = None
self.text_n_b = None
# Languages of strings being compared
self.language_a = None
self.language_b = None
return
def set_ref_a(self, text_ref):
"""
Set the reference values related to the str_a compared string
:param text_info: dict
-- author: str
-- work: str
-- subwork: str
-- text_n: str (a string instead of integer for variations in numbering systems that may inlude integers and alpha characters (e.g. '101b'))
:return: void
"""
if 'author' in text_ref:
self.author_a = text_ref['author']
if 'work' in text_ref:
self.work_a = text_ref['work']
if 'subwork' in text_ref:
self.subwork_a = text_ref['subwork']
if 'text_n' in text_ref:
self.text_n_a = text_ref['text_n']
if 'language' in text_ref:
self.language_a = text_ref['language']
return
def set_ref_b(self, text_ref):
"""
Set the reference values related to the str_b compared string
:param text_info: dict
-- author: str
-- work: str
-- subwork: str
-- text_n: str (a string instead of integer for variations in numbering systems that may inlude integers and alpha characters (e.g. '101b'))
:return: void
"""
if 'author' in text_ref:
self.author_b = text_ref['author']
if 'work' in text_ref:
self.work_b = text_ref['work']
if 'subwork' in text_ref:
self.subwork_b = text_ref['subwork']
if 'text_n' in text_ref:
self.text_n_b = text_ref['text_n']
if 'language' in text_ref:
self.language_b = text_ref['language']
return
def long_substring(str_a, str_b):
"""
Looks for a longest common string between any two given strings passed
:param str_a: str
:param str_b: str
Big Thanks to Pulkit Kathuria(@kevincobain2000) for the function
The function is derived from jProcessing toolkit suite
"""
data = [str_a, str_b]
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0])-i+1):
if j > len(substr) and all(data[0][i:i+j] in x for x in data):
substr = data[0][i:i+j]
return substr.strip()
def minhash(str_a, str_b):
"""
:param str_a: str
:param str_b: str
:Sentences: should be tokenized in string
str_a = u"There is"
str_b = u"There was"
Thanks to Pulkit Kathuria(@kevincobain2000) for the definition of the function.
The function makes use of minhash for estimation of similarity between two strings or texts.
"""
score = 0.0
tok_sent_1 = str_a
tok_sent_2 = str_b
shingles = lambda s: set(s[i:i+3] for i in range(len(s)-2))
try:
jaccard_distance = lambda seta, setb: len(seta & setb)/float(len(seta | setb))
score = jaccard_distance(shingles(tok_sent_1), shingles(tok_sent_2))
return score
except ZeroDivisionError: return score
| 31.921429
| 160
| 0.582009
|
4a0ec6dc7067b5801df6be35e0d52b8f89263b56
| 728
|
py
|
Python
|
djcdek/serialize.py
|
raxers/django-cdek
|
3b06f32f6937266035d49fc0c1bd50e64311c356
|
[
"MIT"
] | null | null | null |
djcdek/serialize.py
|
raxers/django-cdek
|
3b06f32f6937266035d49fc0c1bd50e64311c356
|
[
"MIT"
] | null | null | null |
djcdek/serialize.py
|
raxers/django-cdek
|
3b06f32f6937266035d49fc0c1bd50e64311c356
|
[
"MIT"
] | null | null | null |
import json
import datetime
class CDEKSerializable:
@property
def fields(self):
return self.__dict__
class CDEKEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, CDEKSerializable):
return self._filter_none(o.fields)
elif isinstance(o, datetime):
return o.strftime('%Y-%m-%d')
elif isinstance(o, list):
if len(o) == 0:
return ''
elif o is None:
return ''
return super(CDEKEncoder, self).default(o)
def encode(self, o):
return super(CDEKEncoder, self).encode(o)
def _filter_none(self, value: dict):
return dict(filter(lambda x: x[1] is not None, value.items()))
| 26
| 70
| 0.592033
|
4a0ec7647deec0eebe4985b3fef0ec5227553588
| 1,853
|
py
|
Python
|
desktop/core/ext-py/Django-1.11/tests/gis_tests/geoapp/feeds.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
tests/gis_tests/geoapp/feeds.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
tests/gis_tests/geoapp/feeds.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from __future__ import unicode_literals
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.aggregate(Extent())` -- we can't do that call
# here because `Extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1': TestGeoRSS1,
'rss2': TestGeoRSS2,
'atom1': TestGeoAtom1,
'atom2': TestGeoAtom2,
'w3cgeo1': TestW3CGeo1,
'w3cgeo2': TestW3CGeo2,
'w3cgeo3': TestW3CGeo3,
}
| 25.040541
| 77
| 0.66163
|
4a0ec7cf2b0065bead55c6adb250b165b85f329a
| 758
|
py
|
Python
|
main/home/migrations/0003_auto_20200912_0031.py
|
Shriya-Padhi/Plasma-Donation-Database-System
|
6f5a6f1fc77647cc2faaecbe55efd0e04da33437
|
[
"Apache-2.0"
] | null | null | null |
main/home/migrations/0003_auto_20200912_0031.py
|
Shriya-Padhi/Plasma-Donation-Database-System
|
6f5a6f1fc77647cc2faaecbe55efd0e04da33437
|
[
"Apache-2.0"
] | null | null | null |
main/home/migrations/0003_auto_20200912_0031.py
|
Shriya-Padhi/Plasma-Donation-Database-System
|
6f5a6f1fc77647cc2faaecbe55efd0e04da33437
|
[
"Apache-2.0"
] | 2
|
2021-04-26T10:27:31.000Z
|
2021-12-02T12:38:21.000Z
|
# Generated by Django 3.0.6 on 2020-09-11 19:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_auto_20200912_0026'),
]
operations = [
migrations.AlterField(
model_name='donor_feedback',
name='plasma_bank',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='hospital_feedback',
name='plasma_bank',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 28.074074
| 110
| 0.654354
|
4a0ec976ca19937c77fdc690d4f1affa7a4c4be7
| 2,836
|
py
|
Python
|
aleat3/output/colored.py
|
DiddiLeija/aleat3
|
1267570a719bf40211245a290d70e7d4d710cf82
|
[
"MIT"
] | 2
|
2021-05-04T14:15:18.000Z
|
2021-05-04T14:15:21.000Z
|
aleat3/output/colored.py
|
DiddiLeija/aleat3
|
1267570a719bf40211245a290d70e7d4d710cf82
|
[
"MIT"
] | 17
|
2021-05-06T18:40:00.000Z
|
2021-06-16T13:33:03.000Z
|
aleat3/output/colored.py
|
DiddiLeija/aleat3
|
1267570a719bf40211245a290d70e7d4d710cf82
|
[
"MIT"
] | null | null | null |
"""
New since 0.0.9: Use console-colored functions.
This file uses Colorama (http://pypi.org/project/colorama) for colored output.
"""
__all__ = ["UNABLE",
"output_red",
"output_green",
"output_yellow",
"output_blue",
"output_magenta",
"output_bright"]
error_text = """Unable to load Colorama. Some fuctions may not run correctly
without this package. You can download the package with pip:
pip install colorama
"""
try:
from colorama import Fore, Back, Style, init
UNABLE = True
init(autoreset=True)
def output_red(message: str) -> None:
print(Fore.RED + message)
def output_yellow(message: str) -> None:
print(Fore.YELLOW + message)
def output_green(message: str) -> None:
print(Fore.GREEN + message)
def output_blue(message: str) -> None:
print(Fore.BLUE + message)
def output_magenta(message: str) -> None:
print(Fore.MAGENTA + message)
def output_bright(message: str) -> None:
print(Style.BRIGHT + message)
except ImportError:
print(error_text)
UNABLE = False
def base(a: str) -> None:
print(a+"\n")
# new: all the "patch functions" are just aliases
# of "base()"...
output_red = base
output_green = base
output_yellow = base
output_blue = base
output_magenta = base
output_bright = base
# doing this will let us to simplify all
# the annotation/documentation works.
except Exception as e:
import warnings
warnings.warn("An unexpected error ocurred: '%s'. "%str(e)
"Some functions may fail without this colored functions. Report this "
"to <github.com/diddileija/diddiparser/issues/new>",
UserWarning)
#########################################################################################################################################
"New since 0.1.1: Module test"
def module_test() -> None:
output_magenta("----Module Test: colored.py----")
print("Available colors demostration:")
output_red(" -Red output")
output_green(" -Green output")
output_yellow(" -Yellow output")
output_blue(" -Blue output")
output_magenta(" -Magenta output")
print("Some style demostration")
output_bright(" -Bright output")
print("\n" + "Colorama package:", UNABLE)
if not UNABLE:
print(error_text)
print()
output_green("The module is OK.")
output_magenta("----Test finished----")
d = input("\n" + "Done")
if __name__ == '__main__':
output_yellow("NOTE: When using this file as __main__ level, you are executing the module test. This operation may take some minutes.")
module_test()
| 31.865169
| 140
| 0.583921
|
4a0eca0e1ac5f2a2df9ade6549f58fe0f14de6a7
| 4,113
|
py
|
Python
|
test/functional/rpc_net.py
|
MichaelHDesigns/HodlCash
|
c3ca85c436ba40afadfda11db207068a16527379
|
[
"MIT"
] | null | null | null |
test/functional/rpc_net.py
|
MichaelHDesigns/HodlCash
|
c3ca85c436ba40afadfda11db207068a16527379
|
[
"MIT"
] | null | null | null |
test/functional/rpc_net.py
|
MichaelHDesigns/HodlCash
|
c3ca85c436ba40afadfda11db207068a16527379
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from test_framework.test_framework import HodlCashTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
disconnect_nodes,
p2p_port,
wait_until,
)
class NetTest(HodlCashTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
#self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
disconnect_nodes(self.nodes[0], 1)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(True), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(True, ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, True, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of dcgirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
| 43.294737
| 134
| 0.693411
|
4a0eca40f0ddad7f1f3562a9fa77e7e0b9248146
| 2,115
|
py
|
Python
|
rpython/jit/backend/ppc/field.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/backend/ppc/field.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/backend/ppc/field.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
# only a small file, but there's some hairy stuff in here!
"""
>>> f = Field('test', 16, 31)
>>> f
<Field 'test'>
>>> f.encode(65535)
65535
>>> f.encode(65536)
Traceback (most recent call last):
File \"<stdin>\", line 1, in ?
File \"field.py\", line 25, in encode
raise ValueError(\"field '%s' can't accept value %s\"
ValueError: field 'test' can't accept value 65536
>>>
"""
class Field(object):
def __init__(self, name, left, right, signedness=False, valclass=int, overlap=False):
self.name = name
self.left = left
self.right = right
width = self.right - self.left + 1
# mask applies before shift!
self.mask = 2**width - 1
self.signed = signedness == 'signed'
self.valclass = valclass
self.overlap = overlap == 'overlap'
def __repr__(self):
return '<Field %r>'%(self.name,)
def encode(self, value):
if not issubclass(self.valclass, type(value)):
raise ValueError("field '%s' takes '%s's, not '%s's"
%(self.name, self.valclass.__name__, type(value).__name__))
if not self.signed and value < 0:
raise ValueError("field '%s' is unsigned and can't accept value %d"
%(self.name, value))
# that this does the right thing is /not/ obvious (but true!)
if ((value >> 31) ^ value) & ~(self.mask >> self.signed):
raise ValueError("field '%s' can't accept value %s"
%(self.name, value))
value &= self.mask
value = long(value)
value <<= (32 - self.right - 1)
if value & 0x80000000L:
# yuck:
return ~int((~value)&0xFFFFFFFFL)
else:
return int(value)
def decode(self, inst):
mask = self.mask
v = (inst >> 32 - self.right - 1) & mask
if self.signed and (~mask >> 1) & mask & v:
v = ~(~v&mask)
return self.valclass(v)
def r(self, v, labels, pc):
return self.decode(v)
if __name__=='__main__':
import doctest
doctest.testmod()
| 33.571429
| 89
| 0.547991
|
4a0ecb16413a686ebb6d03d70a5dafa6a3349eaf
| 618
|
py
|
Python
|
openprocurement/auctions/core/interfaces.py
|
EBRD-ProzorroSale/openprocurement.auctions.core
|
52bd59f193f25e4997612fca0f87291decf06966
|
[
"Apache-2.0"
] | 2
|
2016-09-15T20:17:43.000Z
|
2017-01-08T03:32:43.000Z
|
openprocurement/auctions/core/interfaces.py
|
EBRD-ProzorroSale/openprocurement.auctions.core
|
52bd59f193f25e4997612fca0f87291decf06966
|
[
"Apache-2.0"
] | 183
|
2017-12-21T11:04:37.000Z
|
2019-03-27T08:14:34.000Z
|
openprocurement/auctions/core/interfaces.py
|
EBRD-ProzorroSale/openprocurement.auctions.core
|
52bd59f193f25e4997612fca0f87291decf06966
|
[
"Apache-2.0"
] | 12
|
2016-09-05T12:07:48.000Z
|
2019-02-26T09:24:17.000Z
|
# -*- coding: utf-8 -*-
from zope.interface import (
Interface,
Attribute
)
from openprocurement.api.interfaces import (
IAuction as BaseIAuction,
IContentConfigurator # noqa forwarded import
)
# Auction interfaces
class IAuction(BaseIAuction):
"""Interface for auctions"""
# Question interfaces
class IQuestion(Interface):
"""Interface for questions"""
# Manager interfaces
class IManager(Interface):
"""Interface for managers"""
# Bid interfaces
class IBid(Interface):
"""Interface for bids"""
class IAuctionManager(Interface):
name = Attribute('Auction name')
| 15.45
| 51
| 0.700647
|
4a0ecb63c0aeae91991d9a9b2c7fae5de1f696ac
| 25,953
|
py
|
Python
|
sympy/core/symbol.py
|
risubaba/sympy
|
3c872145ca2a444f597c9d57b0e12fc16da7b17a
|
[
"BSD-3-Clause"
] | 1
|
2021-06-22T23:27:55.000Z
|
2021-06-22T23:27:55.000Z
|
sympy/core/symbol.py
|
QuantumNovice/sympy
|
8a7dc8ef786cccbd7dc0fe0b0e6beec4dd1f7b49
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/symbol.py
|
QuantumNovice/sympy
|
8a7dc8ef786cccbd7dc0fe0b0e6beec4dd1f7b49
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from sympy.core.assumptions import StdFactKB, _assume_defined
from sympy.core.compatibility import is_sequence, ordered
from .basic import Basic
from .sympify import sympify
from .singleton import S
from .expr import Expr, AtomicExpr
from .cache import cacheit
from .function import FunctionClass
from sympy.core.logic import fuzzy_bool
from sympy.logic.boolalg import Boolean
from sympy.utilities.iterables import cartes, sift
from sympy.core.containers import Tuple
import string
import re as _re
import random
def _filter_assumptions(kwargs):
"""Split the given dict into assumptions and non-assumptions.
Keys are taken as assumptions if they correspond to an
entry in ``_assume_defined``.
"""
assumptions, nonassumptions = map(dict, sift(kwargs.items(),
lambda i: i[0] in _assume_defined,
binary=True))
Symbol._sanitize(assumptions)
return assumptions, nonassumptions
def _symbol(s, matching_symbol=None, **assumptions):
"""Return s if s is a Symbol, else if s is a string, return either
the matching_symbol if the names are the same or else a new symbol
with the same assumptions as the matching symbol (or the
assumptions as provided).
Examples
========
>>> from sympy import Symbol, Dummy
>>> from sympy.core.symbol import _symbol
>>> _symbol('y')
y
>>> _.is_real is None
True
>>> _symbol('y', real=True).is_real
True
>>> x = Symbol('x')
>>> _symbol(x, real=True)
x
>>> _.is_real is None # ignore attribute if s is a Symbol
True
Below, the variable sym has the name 'foo':
>>> sym = Symbol('foo', real=True)
Since 'x' is not the same as sym's name, a new symbol is created:
>>> _symbol('x', sym).name
'x'
It will acquire any assumptions give:
>>> _symbol('x', sym, real=False).is_real
False
Since 'foo' is the same as sym's name, sym is returned
>>> _symbol('foo', sym)
foo
Any assumptions given are ignored:
>>> _symbol('foo', sym, real=False).is_real
True
NB: the symbol here may not be the same as a symbol with the same
name defined elsewhere as a result of different assumptions.
See Also
========
sympy.core.symbol.Symbol
"""
if isinstance(s, str):
if matching_symbol and matching_symbol.name == s:
return matching_symbol
return Symbol(s, **assumptions)
elif isinstance(s, Symbol):
return s
else:
raise ValueError('symbol must be string for symbol name or Symbol')
def _uniquely_named_symbol(xname, exprs=(), compare=str, modify=None, **assumptions):
"""Return a symbol which, when printed, will have a name unique
from any other already in the expressions given. The name is made
unique by prepending underscores (default) but this can be
customized with the keyword 'modify'.
Parameters
==========
xname : a string or a Symbol (when symbol xname <- str(xname))
compare : a single arg function that takes a symbol and returns
a string to be compared with xname (the default is the str
function which indicates how the name will look when it
is printed, e.g. this includes underscores that appear on
Dummy symbols)
modify : a single arg function that changes its string argument
in some way (the default is to prepend underscores)
Examples
========
>>> from sympy.core.symbol import _uniquely_named_symbol as usym, Dummy
>>> from sympy.abc import x
>>> usym('x', x)
_x
"""
default = None
if is_sequence(xname):
xname, default = xname
x = str(xname)
if not exprs:
return _symbol(x, default, **assumptions)
if not is_sequence(exprs):
exprs = [exprs]
syms = set().union(*[e.free_symbols for e in exprs])
if modify is None:
modify = lambda s: '_' + s
while any(x == compare(s) for s in syms):
x = modify(x)
return _symbol(x, default, **assumptions)
class Symbol(AtomicExpr, Boolean):
"""
Assumptions:
commutative = True
You can override the default assumptions in the constructor:
>>> from sympy import symbols
>>> A,B = symbols('A,B', commutative = False)
>>> bool(A*B != B*A)
True
>>> bool(A*B*2 == 2*A*B) == True # multiplication by scalars is commutative
True
"""
is_comparable = False
__slots__ = ('name',)
is_Symbol = True
is_symbol = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Symbols.
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> x._diff_wrt
True
"""
return True
@staticmethod
def _sanitize(assumptions, obj=None):
"""Remove None, covert values to bool, check commutativity *in place*.
"""
# be strict about commutativity: cannot be None
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
if is_commutative is None:
whose = '%s ' % obj.__name__ if obj else ''
raise ValueError(
'%scommutativity must be True or False.' % whose)
# sanitize other assumptions so 1 -> True and 0 -> False
for key in list(assumptions.keys()):
from collections import defaultdict
from sympy.utilities.exceptions import SymPyDeprecationWarning
keymap = defaultdict(lambda: None)
keymap.update({'bounded': 'finite', 'unbounded': 'infinite', 'infinitesimal': 'zero'})
if keymap[key]:
SymPyDeprecationWarning(
feature="%s assumption" % key,
useinstead="%s" % keymap[key],
issue=8071,
deprecated_since_version="0.7.6").warn()
assumptions[keymap[key]] = assumptions[key]
assumptions.pop(key)
key = keymap[key]
v = assumptions[key]
if v is None:
assumptions.pop(key)
continue
assumptions[key] = bool(v)
def _merge(self, assumptions):
base = self.assumptions0
for k in set(assumptions) & set(base):
if assumptions[k] != base[k]:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
non-matching assumptions for %s: existing value
is %s and new value is %s''' % (
k, base[k], assumptions[k])))
base.update(assumptions)
return base
def __new__(cls, name, **assumptions):
"""Symbols are identified by name and assumptions::
>>> from sympy import Symbol
>>> Symbol("x") == Symbol("x")
True
>>> Symbol("x", real=True) == Symbol("x", real=False)
False
"""
cls._sanitize(assumptions, cls)
return Symbol.__xnew_cached_(cls, name, **assumptions)
def __new_stage2__(cls, name, **assumptions):
if not isinstance(name, str):
raise TypeError("name should be a string, not %s" % repr(type(name)))
obj = Expr.__new__(cls)
obj.name = name
# TODO: Issue #8873: Forcing the commutative assumption here means
# later code such as ``srepr()`` cannot tell whether the user
# specified ``commutative=True`` or omitted it. To workaround this,
# we keep a copy of the assumptions dict, then create the StdFactKB,
# and finally overwrite its ``._generator`` with the dict copy. This
# is a bit of a hack because we assume StdFactKB merely copies the
# given dict as ``._generator``, but future modification might, e.g.,
# compute a minimal equivalent assumption set.
tmp_asm_copy = assumptions.copy()
# be strict about commutativity
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
assumptions['commutative'] = is_commutative
obj._assumptions = StdFactKB(assumptions)
obj._assumptions._generator = tmp_asm_copy # Issue #8873
return obj
__xnew__ = staticmethod(
__new_stage2__) # never cached (e.g. dummy)
__xnew_cached_ = staticmethod(
cacheit(__new_stage2__)) # symbols are always cached
def __getnewargs__(self):
return (self.name,)
def __getstate__(self):
return {'_assumptions': self._assumptions}
def _hashable_content(self):
# Note: user-specified assumptions not hashed, just derived ones
return (self.name,) + tuple(sorted(self.assumptions0.items()))
def _eval_subs(self, old, new):
from sympy.core.power import Pow
if old.is_Pow:
return Pow(self, S.One, evaluate=False)._eval_subs(old, new)
@property
def assumptions0(self):
return dict((key, value) for key, value
in self._assumptions.items() if value is not None)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def as_dummy(self):
return Dummy(self.name)
def as_real_imag(self, deep=True, **hints):
from sympy import im, re
if hints.get('ignore') == self:
return None
else:
return (re(self), im(self))
def _sage_(self):
import sage.all as sage
return sage.var(self.name)
def is_constant(self, *wrt, **flags):
if not wrt:
return False
return not self in wrt
@property
def free_symbols(self):
return {self}
binary_symbols = free_symbols # in this case, not always
def as_set(self):
return S.UniversalSet
class Dummy(Symbol):
"""Dummy symbols are each unique, even if they have the same name:
>>> from sympy import Dummy
>>> Dummy("x") == Dummy("x")
False
If a name is not supplied then a string value of an internal count will be
used. This is useful when a temporary variable is needed and the name
of the variable used in the expression is not important.
>>> Dummy() #doctest: +SKIP
_Dummy_10
"""
# In the rare event that a Dummy object needs to be recreated, both the
# `name` and `dummy_index` should be passed. This is used by `srepr` for
# example:
# >>> d1 = Dummy()
# >>> d2 = eval(srepr(d1))
# >>> d2 == d1
# True
#
# If a new session is started between `srepr` and `eval`, there is a very
# small chance that `d2` will be equal to a previously-created Dummy.
_count = 0
_prng = random.Random()
_base_dummy_index = _prng.randint(10**6, 9*10**6)
__slots__ = ('dummy_index',)
is_Dummy = True
def __new__(cls, name=None, dummy_index=None, **assumptions):
if dummy_index is not None:
assert name is not None, "If you specify a dummy_index, you must also provide a name"
if name is None:
name = "Dummy_" + str(Dummy._count)
if dummy_index is None:
dummy_index = Dummy._base_dummy_index + Dummy._count
Dummy._count += 1
cls._sanitize(assumptions, cls)
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.dummy_index = dummy_index
return obj
def __getstate__(self):
return {'_assumptions': self._assumptions, 'dummy_index': self.dummy_index}
@cacheit
def sort_key(self, order=None):
return self.class_key(), (
2, (str(self), self.dummy_index)), S.One.sort_key(), S.One
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
class Wild(Symbol):
"""
A Wild symbol matches anything, or anything
without whatever is explicitly excluded.
Parameters
==========
name : str
Name of the Wild instance.
exclude : iterable, optional
Instances in ``exclude`` will not be matched.
properties : iterable of functions, optional
Functions, each taking an expressions as input
and returns a ``bool``. All functions in ``properties``
need to return ``True`` in order for the Wild instance
to match the expression.
Examples
========
>>> from sympy import Wild, WildFunction, cos, pi
>>> from sympy.abc import x, y, z
>>> a = Wild('a')
>>> x.match(a)
{a_: x}
>>> pi.match(a)
{a_: pi}
>>> (3*x**2).match(a*x)
{a_: 3*x}
>>> cos(x).match(a)
{a_: cos(x)}
>>> b = Wild('b', exclude=[x])
>>> (3*x**2).match(b*x)
>>> b.match(a)
{a_: b_}
>>> A = WildFunction('A')
>>> A.match(a)
{a_: A_}
Tips
====
When using Wild, be sure to use the exclude
keyword to make the pattern more precise.
Without the exclude pattern, you may get matches
that are technically correct, but not what you
wanted. For example, using the above without
exclude:
>>> from sympy import symbols
>>> a, b = symbols('a b', cls=Wild)
>>> (2 + 3*y).match(a*x + b*y)
{a_: 2/x, b_: 3}
This is technically correct, because
(2/x)*x + 3*y == 2 + 3*y, but you probably
wanted it to not match at all. The issue is that
you really didn't want a and b to include x and y,
and the exclude parameter lets you specify exactly
this. With the exclude parameter, the pattern will
not match.
>>> a = Wild('a', exclude=[x, y])
>>> b = Wild('b', exclude=[x, y])
>>> (2 + 3*y).match(a*x + b*y)
Exclude also helps remove ambiguity from matches.
>>> E = 2*x**3*y*z
>>> a, b = symbols('a b', cls=Wild)
>>> E.match(a*b)
{a_: 2*y*z, b_: x**3}
>>> a = Wild('a', exclude=[x, y])
>>> E.match(a*b)
{a_: z, b_: 2*x**3*y}
>>> a = Wild('a', exclude=[x, y, z])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
Wild also accepts a ``properties`` parameter:
>>> a = Wild('a', properties=[lambda k: k.is_Integer])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
"""
is_Wild = True
__slots__ = ('exclude', 'properties')
def __new__(cls, name, exclude=(), properties=(), **assumptions):
exclude = tuple([sympify(x) for x in exclude])
properties = tuple(properties)
cls._sanitize(assumptions, cls)
return Wild.__xnew__(cls, name, exclude, properties, **assumptions)
def __getnewargs__(self):
return (self.name, self.exclude, self.properties)
@staticmethod
@cacheit
def __xnew__(cls, name, exclude, properties, **assumptions):
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.exclude = exclude
obj.properties = properties
return obj
def _hashable_content(self):
return super(Wild, self)._hashable_content() + (self.exclude, self.properties)
# TODO add check against another Wild
def matches(self, expr, repl_dict={}, old=False):
if any(expr.has(x) for x in self.exclude):
return None
if any(not f(expr) for f in self.properties):
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
_range = _re.compile('([0-9]*:[0-9]+|[a-zA-Z]?:[a-zA-Z])')
def symbols(names, **args):
r"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from sympy import symbols, Function
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols({'a', 'b', 'c'})
{a, b, c}
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols(r'x(:1\,:2)') # or r'x((:1)\,(:2))'
(x(0,0), x(0,1))
All newly created symbols have assumptions set according to ``args``::
>>> a = symbols('a', integer=True)
>>> a.is_integer
True
>>> x, y, z = symbols('x,y,z', real=True)
>>> x.is_real and y.is_real and z.is_real
True
Despite its name, :func:`symbols` can create symbol-like objects like
instances of Function or Wild classes. To achieve this, set ``cls``
keyword argument to the desired type::
>>> symbols('f,g,h', cls=Function)
(f, g, h)
>>> type(_[0])
<class 'sympy.core.function.UndefinedFunction'>
"""
result = []
if isinstance(names, str):
marker = 0
literals = [r'\,', r'\:', r'\ ']
for i in range(len(literals)):
lit = literals.pop(0)
if lit in names:
while chr(marker) in names:
marker += 1
lit_char = chr(marker)
marker += 1
names = names.replace(lit, lit_char)
literals.append((lit_char, lit[1:]))
def literal(s):
if literals:
for c, l in literals:
s = s.replace(c, l)
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
# split on commas
names = [n.strip() for n in names.split(',')]
if not all(n for n in names):
raise ValueError('missing symbol between commas')
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i: i + 1] = names[i].split()
cls = args.pop('cls', Symbol)
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol')
if ':' not in name:
symbol = cls(literal(name), **args)
result.append(symbol)
continue
split = _range.split(name)
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and split[i] != ':' and \
split[i - 1].endswith('(') and \
split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:]
for i, s in enumerate(split):
if ':' in s:
if s[-1].endswith(':'):
raise ValueError('missing end range')
a, b = s.split(':')
if b[-1] in string.digits:
a = 0 if not a else int(a)
b = int(b)
split[i] = [str(c) for c in range(a, b)]
else:
a = a or 'a'
split[i] = [string.ascii_letters[c] for c in range(
string.ascii_letters.index(a),
string.ascii_letters.index(b) + 1)] # inclusive
if not split[i]:
break
else:
split[i] = [s]
else:
seq = True
if len(split) == 1:
names = split[0]
else:
names = [''.join(s) for s in cartes(*split)]
if literals:
result.extend([cls(literal(s), **args) for s in names])
else:
result.extend([cls(s, **args) for s in names])
if not seq and len(result) <= 1:
if not result:
return ()
return result[0]
return tuple(result)
else:
for name in names:
result.append(symbols(name, **args))
return type(names)(result)
def var(names, **args):
"""
Create symbols and inject them into the global namespace.
This calls :func:`symbols` with the same arguments and puts the results
into the *global* namespace. It's recommended not to use :func:`var` in
library code, where :func:`symbols` has to be used::
Examples
========
>>> from sympy import var
>>> var('x')
x
>>> x
x
>>> var('a,ab,abc')
(a, ab, abc)
>>> abc
abc
>>> var('x,y', real=True)
(x, y)
>>> x.is_real and y.is_real
True
See :func:`symbols` documentation for more details on what kinds of
arguments can be passed to :func:`var`.
"""
def traverse(symbols, frame):
"""Recursively inject symbols to the global namespace. """
for symbol in symbols:
if isinstance(symbol, Basic):
frame.f_globals[symbol.name] = symbol
elif isinstance(symbol, FunctionClass):
frame.f_globals[symbol.__name__] = symbol
else:
traverse(symbol, frame)
from inspect import currentframe
frame = currentframe().f_back
try:
syms = symbols(names, **args)
if syms is not None:
if isinstance(syms, Basic):
frame.f_globals[syms.name] = syms
elif isinstance(syms, FunctionClass):
frame.f_globals[syms.__name__] = syms
else:
traverse(syms, frame)
finally:
del frame # break cyclic dependencies as stated in inspect docs
return syms
def disambiguate(*iter):
"""
Return a Tuple containing the passed expressions with symbols
that appear the same when printed replaced with numerically
subscripted symbols, and all Dummy symbols replaced with Symbols.
Parameters
==========
iter: list of symbols or expressions.
Examples
========
>>> from sympy.core.symbol import disambiguate
>>> from sympy import Dummy, Symbol, Tuple
>>> from sympy.abc import y
>>> tup = Symbol('_x'), Dummy('x'), Dummy('x')
>>> disambiguate(*tup)
(x_2, x, x_1)
>>> eqs = Tuple(Symbol('x')/y, Dummy('x')/y)
>>> disambiguate(*eqs)
(x_1/y, x/y)
>>> ix = Symbol('x', integer=True)
>>> vx = Symbol('x')
>>> disambiguate(vx + ix)
(x + x_1,)
To make your own mapping of symbols to use, pass only the free symbols
of the expressions and create a dictionary:
>>> free = eqs.free_symbols
>>> mapping = dict(zip(free, disambiguate(*free)))
>>> eqs.xreplace(mapping)
(x_1/y, x/y)
"""
new_iter = Tuple(*iter)
key = lambda x:tuple(sorted(x.assumptions0.items()))
syms = ordered(new_iter.free_symbols, keys=key)
mapping = {}
for s in syms:
mapping.setdefault(str(s).lstrip('_'), []).append(s)
reps = {}
for k in mapping:
# the first or only symbol doesn't get subscripted but make
# sure that it's a Symbol, not a Dummy
mapk0 = Symbol("%s" % (k), **mapping[k][0].assumptions0)
if mapping[k][0] != mapk0:
reps[mapping[k][0]] = mapk0
# the others get subscripts (and are made into Symbols)
skip = 0
for i in range(1, len(mapping[k])):
while True:
name = "%s_%i" % (k, i + skip)
if name not in mapping:
break
skip += 1
ki = mapping[k][i]
reps[ki] = Symbol(name, **ki.assumptions0)
return new_iter.xreplace(reps)
| 30.713609
| 98
| 0.567449
|
4a0ecd4da18f0ecff244dbd7d62216e62af7b69b
| 5,263
|
py
|
Python
|
keras_bert/loader.py
|
Jie-Yuan/keras-bert
|
c3181db8fb292691335e38610628ff75f3762b54
|
[
"MIT"
] | null | null | null |
keras_bert/loader.py
|
Jie-Yuan/keras-bert
|
c3181db8fb292691335e38610628ff75f3762b54
|
[
"MIT"
] | null | null | null |
keras_bert/loader.py
|
Jie-Yuan/keras-bert
|
c3181db8fb292691335e38610628ff75f3762b54
|
[
"MIT"
] | null | null | null |
import json
import keras
import numpy as np
import tensorflow as tf
from .bert import get_model
def load_trained_model_from_checkpoint(config_file, checkpoint_file, training=False):
with open(config_file, 'r') as reader:
config = json.loads(reader.read())
model = get_model(
token_num=config['vocab_size'],
pos_num=config['max_position_embeddings'],
seq_len=config['max_position_embeddings'],
embed_dim=config['hidden_size'],
transformer_num=config['num_hidden_layers'],
head_num=config['num_attention_heads'],
feed_forward_dim=config['intermediate_size'],
training=training,
)
if not training:
inputs, outputs = model
model = keras.models.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='mse', metrics={})
model.get_layer(name='Embedding-Token').set_weights([
tf.train.load_variable(checkpoint_file, 'bert/embeddings/word_embeddings'),
])
model.get_layer(name='Embedding-Position').set_weights([
tf.train.load_variable(checkpoint_file, 'bert/embeddings/position_embeddings'),
])
model.get_layer(name='Embedding-Segment').set_weights([
tf.train.load_variable(checkpoint_file, 'bert/embeddings/token_type_embeddings'),
])
model.get_layer(name='Embedding-Norm').set_weights([
tf.train.load_variable(checkpoint_file, 'bert/embeddings/LayerNorm/gamma'),
tf.train.load_variable(checkpoint_file, 'bert/embeddings/LayerNorm/beta'),
])
for i in range(config['num_hidden_layers']):
model.get_layer(name='Encoder-%d-MultiHeadSelfAttention' % (i + 1)).set_weights([
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/self/query/kernel' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/self/query/bias' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/self/key/kernel' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/self/key/bias' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/self/value/kernel' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/self/value/bias' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/output/dense/kernel' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/output/dense/bias' % i),
])
model.get_layer(name='Encoder-%d-MultiHeadSelfAttention-Norm' % (i + 1)).set_weights([
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/output/LayerNorm/gamma' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/output/LayerNorm/beta' % i),
])
model.get_layer(name='Encoder-%d-MultiHeadSelfAttention-Norm' % (i + 1)).set_weights([
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/output/LayerNorm/gamma' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/attention/output/LayerNorm/beta' % i),
])
model.get_layer(name='Encoder-%d-FeedForward' % (i + 1)).set_weights([
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/intermediate/dense/kernel' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/intermediate/dense/bias' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/output/dense/kernel' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/output/dense/bias' % i),
])
model.get_layer(name='Encoder-%d-FeedForward-Norm' % (i + 1)).set_weights([
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/output/LayerNorm/gamma' % i),
tf.train.load_variable(checkpoint_file, 'bert/encoder/layer_%d/output/LayerNorm/beta' % i),
])
if training:
model.get_layer(name='MLM-Dense').set_weights([
tf.train.load_variable(checkpoint_file, 'cls/predictions/transform/dense/kernel'),
tf.train.load_variable(checkpoint_file, 'cls/predictions/transform/dense/bias'),
])
model.get_layer(name='MLM-Norm').set_weights([
tf.train.load_variable(checkpoint_file, 'cls/predictions/transform/LayerNorm/gamma'),
tf.train.load_variable(checkpoint_file, 'cls/predictions/transform/LayerNorm/beta'),
])
model.get_layer(name='MLM-Sim').set_weights([
tf.train.load_variable(checkpoint_file, 'cls/predictions/output_bias'),
])
model.get_layer(name='NSP-Dense').set_weights([
tf.train.load_variable(checkpoint_file, 'bert/pooler/dense/kernel'),
tf.train.load_variable(checkpoint_file, 'bert/pooler/dense/bias'),
])
model.get_layer(name='NSP').set_weights([
np.transpose(tf.train.load_variable(checkpoint_file, 'cls/seq_relationship/output_weights')),
tf.train.load_variable(checkpoint_file, 'cls/seq_relationship/output_bias'),
])
else:
model.trainable = False
return model
| 58.477778
| 114
| 0.687631
|
4a0ecd7e0e380de9f8fb54f6ea86bb1818eda3ca
| 877
|
py
|
Python
|
pynecone/pynecone.py
|
markolaban/pycli
|
e9c7b660a0ceb2ed61a6d3f87558a164bb0d2792
|
[
"BSD-3-Clause"
] | null | null | null |
pynecone/pynecone.py
|
markolaban/pycli
|
e9c7b660a0ceb2ed61a6d3f87558a164bb0d2792
|
[
"BSD-3-Clause"
] | null | null | null |
pynecone/pynecone.py
|
markolaban/pycli
|
e9c7b660a0ceb2ed61a6d3f87558a164bb0d2792
|
[
"BSD-3-Clause"
] | null | null | null |
from .shell import Shell
from .gen import Gen
from .env import Env
from .broker import Broker
from .folder import Folder
from .job import Job
from .mount import Mount
from .task import Task
from .topic import Topic
from .repl import Repl
from .api import Api
from .rest import Rest
from .config import Config
from .test import Test
class Pynecone(Shell):
def __init__(self):
super().__init__('pynecone')
def get_commands(self):
return [Gen(),
Env(),
Api(),
Broker(),
Folder(),
Job(),
Mount(),
Task(),
Topic(),
Test(),
Repl(),
Rest()] + Config.init().list_commands()
def add_arguments(self, parser):
pass
def get_help(self):
return 'pynecone shell'
| 21.925
| 55
| 0.54504
|
4a0ecddb44135185b16de1464509c22c6d937d52
| 2,401
|
py
|
Python
|
photo/migrations/0001_initial.py
|
Mukantwarivirginie/Instagram
|
91cf561e696638ec0ef410cc6410b844cf9bc2e7
|
[
"MIT"
] | null | null | null |
photo/migrations/0001_initial.py
|
Mukantwarivirginie/Instagram
|
91cf561e696638ec0ef410cc6410b844cf9bc2e7
|
[
"MIT"
] | null | null | null |
photo/migrations/0001_initial.py
|
Mukantwarivirginie/Instagram
|
91cf561e696638ec0ef410cc6410b844cf9bc2e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-28 16:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.Field(max_length=30)),
('bio', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('bio', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_name', models.CharField(max_length=30)),
('image', models.ImageField(upload_to='photo/')),
('like', models.TextField()),
],
),
migrations.CreateModel(
name='NewArticleForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.CharField(max_length=30)),
('picture', models.ImageField(upload_to='photo/')),
('like', models.TextField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='photo/')),
('name', models.CharField(max_length=30)),
('bio', models.CharField(max_length=30)),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 37.515625
| 120
| 0.558934
|
4a0ece2d14b9cdbfdde6aa3123d6e56ebe08a4ff
| 1,100
|
py
|
Python
|
sources/openjpeg/large_image_source_openjpeg/girder_source.py
|
naglepuff/large_image
|
4e928166f228fe894c38e4b01af5370e72f7229c
|
[
"Apache-2.0"
] | 85
|
2017-03-10T09:48:17.000Z
|
2022-03-31T18:55:58.000Z
|
sources/openjpeg/large_image_source_openjpeg/girder_source.py
|
naglepuff/large_image
|
4e928166f228fe894c38e4b01af5370e72f7229c
|
[
"Apache-2.0"
] | 248
|
2017-01-27T16:11:13.000Z
|
2022-03-31T14:05:18.000Z
|
sources/openjpeg/large_image_source_openjpeg/girder_source.py
|
naglepuff/large_image
|
4e928166f228fe894c38e4b01af5370e72f7229c
|
[
"Apache-2.0"
] | 33
|
2017-03-10T14:06:35.000Z
|
2022-03-19T08:32:06.000Z
|
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from girder_large_image.girder_tilesource import GirderTileSource
from . import OpenjpegFileTileSource
class OpenjpegGirderTileSource(OpenjpegFileTileSource, GirderTileSource):
"""
Provides tile access to Girder items with a jp2 file or other files that
the openjpeg library can read.
"""
cacheName = 'tilesource'
name = 'openjpeg'
| 36.666667
| 78
| 0.645455
|
4a0eceedf6c81081432c6bdf1999c850ed516591
| 632
|
py
|
Python
|
tests/const.py
|
dbarvik/thermal_comfort
|
d09c72d1b23ed6e1f5079212a20b1c8002b6237b
|
[
"MIT"
] | null | null | null |
tests/const.py
|
dbarvik/thermal_comfort
|
d09c72d1b23ed6e1f5079212a20b1c8002b6237b
|
[
"MIT"
] | null | null | null |
tests/const.py
|
dbarvik/thermal_comfort
|
d09c72d1b23ed6e1f5079212a20b1c8002b6237b
|
[
"MIT"
] | null | null | null |
"""General test constants."""
from homeassistant.const import CONF_NAME
from custom_components.thermal_comfort.const import (
CONF_HUMIDITY_SENSOR,
CONF_POLL,
CONF_TEMPERATURE_SENSOR,
)
from custom_components.thermal_comfort.sensor import CONF_CUSTOM_ICONS, SensorType
USER_INPUT = {
CONF_NAME: "test_thermal_comfort",
CONF_TEMPERATURE_SENSOR: "sensor.test_temperature_sensor",
CONF_HUMIDITY_SENSOR: "sensor.test_humidity_sensor",
CONF_POLL: False,
CONF_CUSTOM_ICONS: False,
}
USER_NEW_INPUT = dict(USER_INPUT)
USER_NEW_INPUT[CONF_NAME] = "New name"
for i in SensorType:
USER_INPUT[i] = True
| 26.333333
| 82
| 0.780063
|
4a0ecf0a3c965578a76e4e3a0693cd2857cd266e
| 2,042
|
py
|
Python
|
privacy/tests/test_settings.py
|
bitlabstudio/django-privacy
|
2dda89b411794d35c623ef060c18f45faba32986
|
[
"MIT"
] | 2
|
2017-11-06T21:52:07.000Z
|
2019-07-15T19:18:05.000Z
|
privacy/tests/test_settings.py
|
bitlabstudio/django-privacy
|
2dda89b411794d35c623ef060c18f45faba32986
|
[
"MIT"
] | null | null | null |
privacy/tests/test_settings.py
|
bitlabstudio/django-privacy
|
2dda89b411794d35c623ef060c18f45faba32986
|
[
"MIT"
] | null | null | null |
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
PRIVACY_CLEARANCE_LEVEL_FUNCTION = \
'test_app.privacy_settings.get_clearance_level'
SITE_ID = 1
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = 'privacy.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(__file__, '../../static/')
STATICFILES_DIRS = (
os.path.join(__file__, 'test_static'),
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '../templates'),
)
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
os.path.dirname(__file__), 'coverage')
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'django_nose',
'hvad',
'django_libs',
]
INTERNAL_APPS = [
'privacy.tests.test_app',
'privacy',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
SECRET_KEY = 'mn4fjcj5=(cj36$_8a!6ar0u!0ko!b24kns&gz7u*k*@a5t(ob'
| 24.309524
| 65
| 0.703722
|
4a0ed0070c39853c8024c8eb85dfa31e3d9d7f24
| 3,015
|
py
|
Python
|
tools/perry.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/perry.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/perry.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all permutations of pairs of tests in a gtest binary to attempt to
detect state leakage between tests.
Example invocation:
gn gen out/asan --args='is_asan=true enable_nacl=false is_debug=false'
ninja -C out/asan base_unittests
tools/perry.py out/asan/base_unittests > perry.log &
tail -f perry.log
You might want to run it in `screen` as it'll take a while.
"""
from __future__ import print_function
import argparse
import os
import multiprocessing
import subprocess
import sys
def _GetTestList(path_to_binary):
"""Returns a set of full test names.
Each test will be of the form "Case.Test". There will be a separate line
for each combination of Case/Test (there are often multiple tests in each
case).
"""
raw_output = subprocess.check_output([path_to_binary, "--gtest_list_tests"])
input_lines = raw_output.splitlines()
# The format of the gtest_list_tests output is:
# "Case1."
# " Test1 # <Optional extra stuff>"
# " Test2"
# "Case2."
# " Test1"
case_name = '' # Includes trailing dot.
test_set = set()
for line in input_lines:
if len(line) > 1:
if '#' in line:
line = line[:line.find('#')]
if line[0] == ' ':
# Indented means a test in previous case.
test_set.add(case_name + line.strip())
else:
# New test case.
case_name = line.strip()
return test_set
def _CheckForFailure(data):
test_binary, pair0, pair1 = data
p = subprocess.Popen(
[test_binary, '--gtest_repeat=5', '--gtest_shuffle',
'--gtest_filter=' + pair0 + ':' + pair1],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate()
if p.returncode != 0:
return (pair0, pair1, out)
return None
def _PrintStatus(i, total, failed):
status = '%d of %d tested (%d failures)' % (i+1, total, failed)
print('\r%s%s' % (status, '\x1B[K'), end=' ')
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser(description="Find failing pairs of tests.")
parser.add_argument('binary', help='Path to gtest binary or wrapper script.')
args = parser.parse_args()
print('Getting test list...')
all_tests = _GetTestList(args.binary)
permuted = [(args.binary, x, y) for x in all_tests for y in all_tests]
failed = []
pool = multiprocessing.Pool()
total_count = len(permuted)
for i, result in enumerate(pool.imap_unordered(
_CheckForFailure, permuted, 1)):
if result:
print('\n--gtest_filter=%s:%s failed\n\n%s\n\n' % (result[0], result[1],
result[2]))
failed.append(result)
_PrintStatus(i, total_count, len(failed))
pool.terminate()
pool.join()
if failed:
print('Failed pairs:')
for f in failed:
print(f[0], f[1])
return 0
if __name__ == '__main__':
sys.exit(main())
| 27.409091
| 79
| 0.658375
|
4a0ed1db0e99ef1b9ba19ea3e9a8fb4bf695a907
| 920
|
py
|
Python
|
moviepy/video/fx/freeze_at_end.py
|
theContentMint/moviepy
|
a2f74d1152563b4a6dafc6c9cdb688c7a5244965
|
[
"MIT"
] | 1
|
2019-10-08T23:12:16.000Z
|
2019-10-08T23:12:16.000Z
|
moviepy/video/fx/freeze_at_end.py
|
theContentMint/moviepy
|
a2f74d1152563b4a6dafc6c9cdb688c7a5244965
|
[
"MIT"
] | null | null | null |
moviepy/video/fx/freeze_at_end.py
|
theContentMint/moviepy
|
a2f74d1152563b4a6dafc6c9cdb688c7a5244965
|
[
"MIT"
] | null | null | null |
from moviepy.decorators import requires_duration
from moviepy.video.VideoClip import ImageClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
@requires_duration
def freeze_at_end(clip, freeze_duration=None, total_duration=None):
"""
Makes the clip freeze on its last frame. With ``duration`` you can
specify the duration of the freeze. With ``total_duration`` you can
specify the total duration of the clip and the freeze (i.e. the
duration of the freeze is automatically calculated). If neither
is provided, the freeze will have an infinite length.
"""
freezed_clip = ImageClip(clip.get_frame(clip.end))
if total_duration:
freeze_duration = total_duration - clip.duration
if freeze_duration:
freezed_clip = freezed_clip.set_duration(freeze_duration)
return CompositeVideoClip([clip,freezed_clip.set_start(clip.end)])
| 40
| 75
| 0.757609
|
4a0ed20b783a9b75dbe57dd4d55c6ac7b4a41da2
| 1,500
|
py
|
Python
|
filter_events.py
|
maua-maua-maua/RAVE
|
c5a2bdf45970994150b2b723afa0d869cbf21b45
|
[
"MIT"
] | null | null | null |
filter_events.py
|
maua-maua-maua/RAVE
|
c5a2bdf45970994150b2b723afa0d869cbf21b45
|
[
"MIT"
] | null | null | null |
filter_events.py
|
maua-maua-maua/RAVE
|
c5a2bdf45970994150b2b723afa0d869cbf21b45
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.compat.v1.summary import FileWriter
from tensorflow.python.summary.summary_iterator import summary_iterator
from tqdm import tqdm
def parse_arguments():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--event", help="event file", required=True)
return parser.parse_args()
def main(args):
with tf.compat.v1.Graph().as_default():
out_path = os.path.join(os.path.dirname(args.event), "filtered_events")
if not os.path.exists(out_path):
os.makedirs(out_path)
writer = FileWriter(out_path)
for event in summary_iterator(args.event):
event_type = event.WhichOneof("what")
if event_type != "summary":
writer.add_event(event)
else:
wall_time = event.wall_time
step = event.step
filtered_values = [
value for value in event.summary.value if value.HasField("simple_value") or step % 50 == 0
]
summary = tf.compat.v1.Summary(value=filtered_values)
filtered_event = tf.compat.v1.summary.Event(summary=summary, wall_time=wall_time, step=step)
writer.add_event(filtered_event)
writer.close()
return 0
if __name__ == "__main__":
args = parse_arguments()
sys.exit(main(args))
| 28.301887
| 110
| 0.638
|
4a0ed286229b0d3a1e0b329dfb4131f6c65c0d37
| 2,382
|
py
|
Python
|
imageops/imageops-deform.py
|
martinmcbride/python-imaging-book-examples
|
37e4ccf9b7b2fc3ff75b1fdb9f772de452a843b2
|
[
"MIT"
] | 1
|
2021-08-22T17:09:44.000Z
|
2021-08-22T17:09:44.000Z
|
imageops/imageops-deform.py
|
sthagen/python-imaging-book-examples
|
2a079c5271f9849bc90a33bed6f3288142035ea7
|
[
"MIT"
] | null | null | null |
imageops/imageops-deform.py
|
sthagen/python-imaging-book-examples
|
2a079c5271f9849bc90a33bed6f3288142035ea7
|
[
"MIT"
] | 1
|
2021-08-22T17:09:48.000Z
|
2021-08-22T17:09:48.000Z
|
# Author: Martin McBride
# Created: 2021-07-19
# Copyright (C) 2021, Martin McBride
# License: MIT
# deform an image
from PIL import Image, ImageOps, ImageDraw
import math
class SingleDeformer:
def getmesh(self, img):
w, h = img.size
#Map whole image rectangle onto whole image rectangle
return [(
# target rectangle (1)
(200, 100, 300, 200),
# corresponding source quadrilateral (1)
# (NW, SW, SE, and NE. see method=QUAD)
(0, 0, 0, 100, 100, 200, 100, 0)
)]
class WaveDeformer:
def transform(self, x, y):
y = y + 10*math.sin(x/40)
return x, y
def transform_rectangle(self, x0, y0, x1, y1):
return (*self.transform(x0, y0),
*self.transform(x0, y1),
*self.transform(x1, y1),
*self.transform(x1, y0),
)
def getmesh(self, img):
self.w, self.h = img.size
gridspace = 20
target_grid = []
for x in range(0, self.w, gridspace):
for y in range(0, self.h, gridspace):
target_grid.append((x, y, x + gridspace, y + gridspace))
source_grid = [self.transform_rectangle(*rect) for rect in target_grid]
return [t for t in zip(target_grid, source_grid)]
image = Image.open('boat-small.jpg')
result_image = ImageOps.deform(image, SingleDeformer())
result_image.save('imageops-deform.jpg')
result_image = ImageOps.deform(image, WaveDeformer())
result_image.save('imageops-wavedeform.jpg')
grid = Image.new('RGB', image.size, 'grey')
mesh = WaveDeformer().getmesh(grid)
draw = ImageDraw.Draw(grid)
for target, source in mesh:
x, y = source[0], source[1]
draw.ellipse((x-2, y-2, x+2, y+2), fill='black')
grid.save('imageops-sourcegrid.png')
grid = Image.open('boat-small.jpg')
mesh = SingleDeformer().getmesh(grid)
draw = ImageDraw.Draw(grid)
for target, source in mesh:
x, y = source[0], source[1]
draw.ellipse((x-2, y-2, x+2, y+2), fill='black')
x, y = source[2], source[3]
draw.ellipse((x - 2, y - 2, x + 2, y + 2), fill='black')
x, y = source[4], source[5]
draw.ellipse((x - 2, y - 2, x + 2, y + 2), fill='black')
x, y = source[6], source[7]
draw.ellipse((x - 2, y - 2, x + 2, y + 2), fill='black')
grid.save('imageops-boatsourcegrid.png')
| 30.151899
| 79
| 0.584803
|
4a0ed2b4af045b5d8054d22e357a8889352e7c83
| 2,317
|
py
|
Python
|
apps/volontulo/tests/models/test_userprofile.py
|
fredfalcon/owasp-volunteer-portal
|
9b90455deb7817e0b537a39b43533e384ce51a6f
|
[
"MIT"
] | 4
|
2018-05-19T23:34:08.000Z
|
2022-03-31T23:26:41.000Z
|
apps/volontulo/tests/models/test_userprofile.py
|
fredfalcon/owasp-volunteer-portal
|
9b90455deb7817e0b537a39b43533e384ce51a6f
|
[
"MIT"
] | null | null | null |
apps/volontulo/tests/models/test_userprofile.py
|
fredfalcon/owasp-volunteer-portal
|
9b90455deb7817e0b537a39b43533e384ce51a6f
|
[
"MIT"
] | 1
|
2021-01-28T12:12:28.000Z
|
2021-01-28T12:12:28.000Z
|
# -*- coding: utf-8 -*-
u"""
.. module:: test_userprofile
"""
from __future__ import unicode_literals
from django.test import TestCase
from apps.volontulo.models import Organization
from apps.volontulo.models import User
from apps.volontulo.models import UserProfile
class TestUserProfile(TestCase):
u"""Class responsible for testing UserProfile model."""
def setUp(self):
u"""Set up each test."""
# volunteer user
self.volunteer_user = UserProfile.objects.create(
user=User.objects.create_user(
username='volunteer@example.com',
email='volunteer@example.com',
password='volunteer',
),
is_administrator=False,
)
# organization user
self.organization_user = UserProfile.objects.create(
user=User.objects.create_user(
username='organization@example.com',
email='organization@example.com',
password='organization',
),
is_administrator=False,
)
self.organization_user.organizations.add(
Organization.objects.create(name=u'Organization')
)
# administrator user
self.administrator_user = UserProfile.objects.create(
user=User.objects.create_user(
username='administrator@example.com',
email='administrator@example.com',
password='administrator',
),
is_administrator=True
)
def test__string_reprezentation(self):
u"""String reprezentation of an userprofile object."""
self.assertEqual(
str(self.volunteer_user),
u'volunteer@example.com',
)
self.assertEqual(
str(self.organization_user),
u'organization@example.com'
)
self.assertEqual(
str(self.administrator_user),
u'administrator@example.com'
)
def test__is_admin_or_volunteer(self):
"""Check if specified user has enabled/disabled administrator flag."""
self.assertTrue(self.administrator_user.is_administrator)
self.assertFalse(self.volunteer_user.is_administrator)
self.assertFalse(self.organization_user.is_administrator)
| 31.310811
| 78
| 0.617609
|
4a0ed2bb155cf8e9b4c234239ece0f4c510e92d8
| 768
|
py
|
Python
|
vdaoGovToken/scripts/py/includes/config.py
|
harmony-one-vdao/vdao-gov-token
|
dd1adebab7e2d2e8dfcdcbebcace0d0431df4d8b
|
[
"MIT"
] | null | null | null |
vdaoGovToken/scripts/py/includes/config.py
|
harmony-one-vdao/vdao-gov-token
|
dd1adebab7e2d2e8dfcdcbebcace0d0431df4d8b
|
[
"MIT"
] | null | null | null |
vdaoGovToken/scripts/py/includes/config.py
|
harmony-one-vdao/vdao-gov-token
|
dd1adebab7e2d2e8dfcdcbebcace0d0431df4d8b
|
[
"MIT"
] | null | null | null |
import logging, os, sys
import pathlib
file_path = pathlib.Path(__file__).parent.resolve()
places = 1000000000000000000
pages = 10
harmony_api = "https://rpc.hermesdefi.io/"
def create_data_path(pth: str, data_path: str = "data") -> os.path:
p = os.path.join(file_path, '..', data_path, pth)
if not os.path.exists(p):
os.mkdir(p)
return p
create_data_path((""))
create_data_path(("logs"), "")
file_handler = logging.FileHandler(
filename=os.path.join(file_path, '..', "logs", "rpc_data.log"))
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
logging.basicConfig(level=logging.INFO,
format="%(message)s",
handlers=handlers)
log = logging.getLogger()
| 26.482759
| 67
| 0.670573
|
4a0ed313aff7c7a8c004922cb0e995c82fd49097
| 525
|
py
|
Python
|
fastest/file_handler/ignore_paths.py
|
ltbringer/fastest
|
4026cfbe055baa07f462dc90b23d4368c5aaea7f
|
[
"MIT"
] | 1
|
2020-04-25T10:44:03.000Z
|
2020-04-25T10:44:03.000Z
|
fastest/file_handler/ignore_paths.py
|
ltbringer/fastest
|
4026cfbe055baa07f462dc90b23d4368c5aaea7f
|
[
"MIT"
] | null | null | null |
fastest/file_handler/ignore_paths.py
|
ltbringer/fastest
|
4026cfbe055baa07f462dc90b23d4368c5aaea7f
|
[
"MIT"
] | null | null | null |
import fnmatch
def is_path_to_be_ignored(event_path, report_path, ignore_patterns):
"""
answers: Is event_path one among the ignore_patterns?
strips report path from the event_path
:param event_path: str
:param report_path: str
:param ignore_patterns: list
:return: bool
"""
for ignored_pattern in ignore_patterns:
_, current_file_path = event_path.split(report_path + '/')
if fnmatch.fnmatch(current_file_path, ignored_pattern):
return True
return False
| 29.166667
| 68
| 0.704762
|
4a0ed38b7df4fa5f905b075bdd460992b3825943
| 22
|
py
|
Python
|
python/testData/inspections/PyRedeclarationInspection/qualifiedTarget.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyRedeclarationInspection/qualifiedTarget.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyRedeclarationInspection/qualifiedTarget.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
a = 10
b = 10
a.b = 10
| 7.333333
| 8
| 0.454545
|
4a0ed543b6daab88212714641ffb3d4152ea31ef
| 5,337
|
py
|
Python
|
examples/benchmark_quality/hetero_sbt/fate-sbt.py
|
qixiuai/FATE
|
6d50af65b96b5b226afda30dfa8e4a1e5746952d
|
[
"Apache-2.0"
] | null | null | null |
examples/benchmark_quality/hetero_sbt/fate-sbt.py
|
qixiuai/FATE
|
6d50af65b96b5b226afda30dfa8e4a1e5746952d
|
[
"Apache-2.0"
] | null | null | null |
examples/benchmark_quality/hetero_sbt/fate-sbt.py
|
qixiuai/FATE
|
6d50af65b96b5b226afda30dfa8e4a1e5746952d
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component.dataio import DataIO
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component.intersection import Intersection
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from pipeline.component.evaluation import Evaluation
from pipeline.interface.model import Model
from pipeline.utils.tools import load_job_config
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./xgb_config_binary.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
backend = config.backend
work_mode = config.work_mode
# data sets
guest_train_data = {"name": param['data_guest_train'], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param['data_host_train'], "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": param['data_guest_val'], "namespace": f"experiment{namespace}"}
host_validate_data = {"name": param['data_host_val'], "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).algorithm_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).algorithm_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).algorithm_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).algorithm_param(table=host_validate_data)
dataio_0, dataio_1 = DataIO(name="dataio_0"), DataIO(name="dataio_1")
dataio_0.get_party_instance(role="guest", party_id=guest).algorithm_param(with_label=True, output_format="dense")
dataio_0.get_party_instance(role="host", party_id=host).algorithm_param(with_label=False)
dataio_1.get_party_instance(role="guest", party_id=guest).algorithm_param(with_label=True, output_format="dense")
dataio_1.get_party_instance(role="host", party_id=host).algorithm_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=param['tree_num'],
task_type=param['task_type'],
objective_param={"objective": param['loss_func']},
encrypt_param={"method": "iterativeAffine"},
tree_param={"max_depth": param['tree_depth']},
validation_freqs=1,
learning_rate=param['learning_rate']
)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type=param['eval_type'])
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
pipeline.add_component(dataio_1, data=Data(data=reader_1.output.data), model=Model(dataio_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=dataio_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=dataio_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit(backend=backend, work_mode=work_mode)
return {}, pipeline.get_component("evaluation_0").get_summary()
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 46.408696
| 117
| 0.691587
|
4a0ed680c39c4c74a6dda7b89e479eb836a9c798
| 12,620
|
py
|
Python
|
federatedml/feature/one_hot_encoder.py
|
fqiang/FATE
|
36a5a41848f78df7be1e520ae804e64bc67d72fb
|
[
"Apache-2.0"
] | 1
|
2019-12-11T06:27:09.000Z
|
2019-12-11T06:27:09.000Z
|
federatedml/feature/one_hot_encoder.py
|
fqiang/FATE
|
36a5a41848f78df7be1e520ae804e64bc67d72fb
|
[
"Apache-2.0"
] | null | null | null |
federatedml/feature/one_hot_encoder.py
|
fqiang/FATE
|
36a5a41848f78df7be1e520ae804e64bc67d72fb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
import math
from arch.api.utils import log_utils
from federatedml.model_base import ModelBase
from federatedml.param.onehot_encoder_param import OneHotEncoderParam
from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2
from federatedml.statistic.data_overview import get_header
from federatedml.util import consts
LOGGER = log_utils.getLogger()
MODEL_PARAM_NAME = 'OneHotParam'
MODEL_META_NAME = 'OneHotMeta'
MODEL_NAME = 'OneHotEncoder'
class OneHotInnerParam(object):
def __init__(self):
self.col_name_maps = {}
self.header = []
self.transform_indexes = []
self.transform_names = []
self.result_header = []
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_result_header(self, result_header: list or tuple):
self.result_header = result_header.copy()
def set_transform_all(self):
self.transform_indexes = [i for i in range(len(self.header))]
self.transform_names = self.header
def add_transform_indexes(self, transform_indexes):
if transform_indexes is None:
return
for idx in transform_indexes:
if idx >= len(self.header):
LOGGER.warning("Adding a index that out of header's bound")
continue
if idx not in self.transform_indexes:
self.transform_indexes.append(idx)
self.transform_names.append(self.header[idx])
def add_transform_names(self, transform_names):
if transform_names is None:
return
for col_name in transform_names:
idx = self.col_name_maps.get(col_name)
if idx is None:
LOGGER.warning("Adding a col_name that is not exist in header")
continue
if idx not in self.transform_indexes:
self.transform_indexes.append(idx)
self.transform_names.append(self.header[idx])
class TransferPair(object):
def __init__(self, name):
self.name = name
self._values = set()
self._transformed_headers = {}
def add_value(self, value):
if value in self._values:
return
self._values.add(value)
if len(self._values) > consts.ONE_HOT_LIMIT:
raise ValueError("Input data should not have more than {} possible value when doing one-hot encode"
.format(consts.ONE_HOT_LIMIT))
self._transformed_headers[value] = self.__encode_new_header(value)
@property
def values(self):
return list(self._values)
@property
def transformed_headers(self):
return [self._transformed_headers[x] for x in self.values]
def query_name_by_value(self, value):
if value not in self._values:
return None
return self._transformed_headers.get(value)
def __encode_new_header(self, value):
return '_'.join([str(x) for x in [self.name, value]])
class OneHotEncoder(ModelBase):
def __init__(self):
super(OneHotEncoder, self).__init__()
self.col_maps = {}
self.schema = {}
self.output_data = None
self.model_param = OneHotEncoderParam()
self.inner_param: OneHotInnerParam = None
def _init_model(self, model_param):
self.model_param = model_param
# self.cols_index = model_param.cols
def fit(self, data_instances):
self._init_params(data_instances)
f1 = functools.partial(self.record_new_header,
inner_param=self.inner_param)
self.col_maps = data_instances.mapPartitions(f1).reduce(self.merge_col_maps)
LOGGER.debug("Before set_schema in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
self._transform_schema()
data_instances = self.transform(data_instances)
LOGGER.debug("After transform in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
return data_instances
def transform(self, data_instances):
self._init_params(data_instances)
LOGGER.debug("In Onehot transform, ori_header: {}, transfered_header: {}".format(
self.inner_param.header, self.inner_param.result_header
))
one_data = data_instances.first()[1].features
LOGGER.debug("Before transform, data is : {}".format(one_data))
f = functools.partial(self.transfer_one_instance,
col_maps=self.col_maps,
inner_param=self.inner_param)
new_data = data_instances.mapValues(f)
self.set_schema(new_data)
one_data = new_data.first()[1].features
LOGGER.debug("transfered data is : {}".format(one_data))
return new_data
def _transform_schema(self):
header = self.inner_param.header.copy()
LOGGER.debug("[Result][OneHotEncoder]Before one-hot, "
"data_instances schema is : {}".format(self.inner_param.header))
result_header = []
for col_name in header:
if col_name not in self.col_maps:
result_header.append(col_name)
continue
pair_obj = self.col_maps[col_name]
new_headers = pair_obj.transformed_headers
result_header.extend(new_headers)
self.inner_param.set_result_header(result_header)
LOGGER.debug("[Result][OneHotEncoder]After one-hot, data_instances schema is : {}".format(header))
def _init_params(self, data_instances):
if len(self.schema) == 0:
self.schema = data_instances.schema
if self.inner_param is not None:
return
self.inner_param = OneHotInnerParam()
# self.schema = data_instances.schema
LOGGER.debug("In _init_params, schema is : {}".format(self.schema))
header = get_header(data_instances)
self.inner_param.set_header(header)
if self.model_param.transform_col_indexes == -1:
self.inner_param.set_transform_all()
else:
self.inner_param.add_transform_indexes(self.model_param.transform_col_indexes)
self.inner_param.add_transform_names(self.model_param.transform_col_names)
@staticmethod
def record_new_header(data, inner_param: OneHotInnerParam):
"""
Generate a new schema based on data value. Each new value will generate a new header.
Returns
-------
col_maps: a dict in which keys are original header, values are dicts. The dicts in value
e.g.
cols_map = {"x1": {1 : "x1_1"},
...}
"""
col_maps = {}
for col_name in inner_param.transform_names:
col_maps[col_name] = TransferPair(col_name)
for _, instance in data:
feature = instance.features
for col_idx, col_name in zip(inner_param.transform_indexes, inner_param.transform_names):
pair_obj = col_maps.get(col_name)
int_feature = math.ceil(feature[col_idx])
if int_feature != feature[col_idx]:
raise ValueError("Onehot input data support integer only")
feature_value = int_feature
pair_obj.add_value(feature_value)
return col_maps
@staticmethod
def encode_new_header(col_name, feature_value):
return '_'.join([str(x) for x in [col_name, feature_value]])
@staticmethod
def merge_col_maps(col_map1, col_map2):
if col_map1 is None and col_map2 is None:
return None
if col_map1 is None:
return col_map2
if col_map2 is None:
return col_map1
for col_name, pair_obj in col_map2.items():
if col_name not in col_map1:
col_map1[col_name] = pair_obj
continue
else:
col_1_obj = col_map1[col_name]
for value in pair_obj.values:
col_1_obj.add_value(value)
return col_map1
@staticmethod
def transfer_one_instance(instance, col_maps, inner_param):
feature = instance.features
result_header = inner_param.result_header
# new_feature = [0 for _ in result_header]
_transformed_value = {}
for idx, col_name in enumerate(inner_param.header):
value = feature[idx]
if col_name in result_header:
_transformed_value[col_name] = value
elif col_name not in col_maps:
continue
else:
pair_obj = col_maps.get(col_name)
new_col_name = pair_obj.query_name_by_value(value)
if new_col_name is None:
continue
_transformed_value[new_col_name] = 1
new_feature = [_transformed_value[x] if x in _transformed_value else 0 for x in result_header]
feature_array = np.array(new_feature)
instance.features = feature_array
return instance
def set_schema(self, data_instance):
self.schema['header'] = self.inner_param.result_header
data_instance.schema = self.schema
def _get_meta(self):
meta_protobuf_obj = onehot_meta_pb2.OneHotMeta(transform_col_names=self.inner_param.transform_names,
header=self.inner_param.header,
need_run=self.need_run)
return meta_protobuf_obj
def _get_param(self):
pb_dict = {}
for col_name, pair_obj in self.col_maps.items():
values = [str(x) for x in pair_obj.values]
value_dict_obj = onehot_param_pb2.ColsMap(values=values,
transformed_headers=pair_obj.transformed_headers)
pb_dict[col_name] = value_dict_obj
result_obj = onehot_param_pb2.OneHotParam(col_map=pb_dict,
result_header=self.inner_param.result_header)
return result_obj
def export_model(self):
if self.model_output is not None:
LOGGER.debug("Model output is : {}".format(self.model_output))
return self.model_output
if self.inner_param is None:
self.inner_param = OneHotInnerParam()
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
return result
def load_model(self, model_dict):
self._parse_need_run(model_dict, MODEL_META_NAME)
model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME)
model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME)
self.model_output = {
MODEL_META_NAME: model_meta,
MODEL_PARAM_NAME: model_param
}
self.inner_param = OneHotInnerParam()
self.inner_param.set_header(list(model_meta.header))
self.inner_param.add_transform_names(list(model_meta.transform_col_names))
col_maps = dict(model_param.col_map)
self.col_maps = {}
for col_name, cols_map_obj in col_maps.items():
if col_name not in self.col_maps:
self.col_maps[col_name] = TransferPair(col_name)
pair_obj = self.col_maps[col_name]
for feature_value in list(cols_map_obj.values):
pair_obj.add_value(eval(feature_value))
self.inner_param.set_result_header(list(model_param.result_header))
| 37.337278
| 111
| 0.626307
|
4a0ed77e730ad96c8d97b9e9d3b39a5017f6a06f
| 12,290
|
py
|
Python
|
preprocessing/data_type_detector.py
|
KI-labs/ML-Navigator
|
c5371da7411e1522a5f423757ae6bd99135e9dbf
|
[
"Apache-2.0"
] | 18
|
2019-09-13T11:34:38.000Z
|
2020-12-09T01:48:07.000Z
|
preprocessing/data_type_detector.py
|
KI-labs/ML-Navigator
|
c5371da7411e1522a5f423757ae6bd99135e9dbf
|
[
"Apache-2.0"
] | 25
|
2019-09-18T07:49:05.000Z
|
2022-03-12T00:03:56.000Z
|
preprocessing/data_type_detector.py
|
KI-labs/ML-Navigator
|
c5371da7411e1522a5f423757ae6bd99135e9dbf
|
[
"Apache-2.0"
] | 3
|
2019-10-07T14:13:17.000Z
|
2021-01-07T02:25:23.000Z
|
import logging
import os
from IPython.display import display
import pandas as pd
from preprocessing.json_preprocessor import feature_with_json_detector
from preprocessing.data_explorer import outliers_detector
logger = logging.getLogger(__name__)
formatting = (
"%(asctime)s: %(levelname)s: File:%(filename)s Function:%(funcName)s Line:%(lineno)d "
"message:%(message)s"
)
logging.basicConfig(
filename=os.path.join(os.path.dirname(os.path.dirname(__file__)), "logs/logs.log"),
level=logging.INFO,
format=formatting,
)
pd.set_option('display.max_rows', None)
class ColumnDataFormat:
""" Data type detector
This class contains methods that help to detect the type of the data in each column of the given dataframe. The
supported data types for now are: date, categorical feature with string values, numeric features with integer
values (categorical), numeric with continuous values, and nested JSON data format.
:param: pd.DataFrame dataframe: A pandas dataframe that contains the dataset e.g. train_dataframe.
:methods: - `find_date_columns` - Date data type finder
- `number_or_string` - Numeric-string finder
- `json_detector`- Valid JSON data finder
- `categorical_or_numeric` - Numeric continuous-discrete finder
"""
def __init__(self, dataframe: pd.DataFrame):
"""
:param pd.DataFrame dataframe: A pandas dataframe that contains the dataset e.g. train_dataframe
"""
self.dataframe = dataframe
def find_date_columns(self) -> list:
""" Date data type finder
This method finds date columns automatically.
:return:
date_columns: list of columns that contain date format data
"""
logger.info("Looking for date columns")
def look_for_date(column_i: pd.Series):
dates = {date: pd.to_datetime(date) for date in column_i.unique()}
return column_i.apply(lambda x: dates[x])
date_columns = []
possible_date = list(self.dataframe.select_dtypes(include=["datetime"]).columns)
if possible_date:
logger.info("Date columns with native date format was found")
logger.debug(
f"there are {len(possible_date)} date column with native format (datetime)"
)
date_columns = [x for x in possible_date]
logger.debug(
f"the columns that contain native date format are {date_columns}"
)
for col in self.dataframe.select_dtypes(include=["object"]).columns:
try:
self.dataframe[col] = look_for_date(self.dataframe[col])
date_columns.append(col)
logger.info(f"column {col} has date data type")
except ValueError:
logger.debug(f"{col} has no date data type")
pass
return date_columns
def number_or_string(self, date_columns: list):
""" Numeric-string finder
The function extracts which columns in the pandas dataframe contain numeric values and which have string values.
It returns three lists of strings
:param list date_columns: contains the name of the columns that have date format data to exclude those
columns from the search.
:return:
- string_columns - A list contains the column names that contain string type data.
- numeric_columns - contains the list of columns that have numbers.
- other_columns - contains the name of columns that have unknown type of data if they exist
"""
string_columns = []
numeric_columns = []
other_columns = []
columns_to_consider = [
x for x in self.dataframe.columns if x not in date_columns
]
regex_for_numeric = r"[-+]?[0-9]*\.?[0-9]*"
for column_i in columns_to_consider:
if self.dataframe[column_i].dropna().dtype == object:
string_columns.append(column_i)
continue
if (
self.dataframe[column_i].dropna().astype(str).str.contains(regex_for_numeric, regex=True).all()
):
numeric_columns.append(column_i)
continue
other_columns.append(column_i)
return string_columns, numeric_columns, other_columns
def json_detector(self, columns_with_strings: list):
""" Valid JSON data finder
This method detects if there is valid nested JSON data inside the columns that have string data. It return Two
lists of strings
:param list columns_with_strings: List of the columns that contain string data
:return:
- string_columns - A list contains the name of the columns that don't have valid JSON nested data.
- json_columns - A list contain the name of the columns that have valid nested JSON data.
"""
json_columns = []
string_columns = []
for column_i in columns_with_strings:
try:
if feature_with_json_detector(self.dataframe[column_i]):
json_columns.append(column_i)
else:
string_columns.append(column_i)
except:
print("This column cannot be checked for json data type:", column_i)
print("it is considered as an object type")
string_columns.append(column_i)
return string_columns, json_columns
def categorical_or_numeric(self, numbers_column_list: list, threshold: float):
""" Numeric continuous-discrete finder
The function extracts the name of the columns that contain numeric discrete values and the columns that have
numeric continuous values from the columns that contain only number data type. The decision is based on the
unique values in that column. If the number of the unique values is less than the pre-defined threshold,
the column type will be considered categorical. It returns two lists of strings.
:param list numbers_column_list: A list of strings which are the names of the columns that contain number value
type.
:param int threshold: It is the minimum number of the unique values that under it the column type will be
considered categorical.
:return:
- categorical_columns - the list of the columns' names of the columns that contain numeric discrete data.
- numeric_columns - The list of the columns' names that contain numeric continuous data.
"""
categorical_columns = []
numeric_columns = []
for column_i in numbers_column_list:
if len(self.dataframe[column_i].value_counts()) <= threshold:
categorical_columns.append(column_i)
else:
numeric_columns.append(column_i)
return categorical_columns, numeric_columns
def detect_column_types(dataframe: pd.DataFrame, threshold: int = 50):
""" Features' types detector
This function applies the methods defined in the `ColumnDataFormat` class to detect data format in each column.
:param pd.DataFrame dataframe: A pandas dataframe that contains the dataset e.g. train_dataframe
:param int threshold: the minimum number of the unique values that under it the column type will be
considered categorical. The default value here is 50. This becomes very important when applying one-hot
encoding.
:return:
- number_of_columns - An integer which is the total number of features. It is used for the validation purpose.
- columns_types_list - A list of lists:
- string_columns - A list of strings which are the columns that contain categorical data type with string labels e.g. Yes, No, Maybe.
- categorical_integer - A list of strings which are the columns that contain categorical data type with numeric labels e.g. 0, 1, 2
- numeric_columns - A list of strings which are the columns that contain columns contains numeric continuous values e.g. float like 0.1, 0.2 or large number of labels of numeric categorical data (larger than the threshold).
- date_columns - A list of strings which are the columns that contain columns contain date format data. e.g. 2015-01-05
- other_columns - A list of strings which are the columns that contain columns that has some other types ( Not implemented yet)
"""
# create an object
column_detector = ColumnDataFormat(dataframe)
# find the date columns first and pass it later to other functions
date_columns = column_detector.find_date_columns()
# find columns that have strings or numbers first (other columns will be used later when detecting JSON nested
# columns)
string_columns, numeric_columns, other_columns = column_detector.number_or_string(
date_columns
)
# for the numeric columns, find out if they have a large number of unique values or not.
categorical_integer, numeric_columns = column_detector.categorical_or_numeric(
numeric_columns, threshold
)
# for the string columns, find out if there is valid JSON nested data
string_columns, json_columns = column_detector.json_detector(string_columns)
columns_types = {
"categorical_string": string_columns,
"categorical_integer": categorical_integer,
"continuous": numeric_columns,
"date": date_columns,
"json": json_columns,
"other": other_columns
}
number_of_columns = sum([len(x) for x in columns_types.values()])
if number_of_columns != dataframe.shape[1]:
raise ValueError("Number of columns must be equal to the dataframe's columns")
return columns_types, number_of_columns
def detect_columns_types_summary(dataframes_dict: dict, threshold: int = 50) -> dict:
"""Data type summarizer
This function summarize the findings after applying the `detect_column_types` function to each given dataset.
:param dict dataframes_dict: a dictionary of pandas dataframes e.g. {"train": train_dataframe,
"test": test_dataframe}
:param int threshold: The maximum number of categories that a categorical feature should have before considering
it as continuous numeric feature.
:return:
columns_types_dict: A dictionary that contains the lists of the columns filtered based on the type of the
data that they contain.
"""
columns_types_dict = {}
for key_i, dataframe in dataframes_dict.items():
columns_types, _ = detect_column_types(
dataframe, threshold=threshold
)
columns_types_dict[key_i] = columns_types
print(f"A summary of the data sets")
data = dict((k, dict((k_, len(v_)) for k_, v_ in v.items())) for k, v in columns_types_dict.items())
for k, v in data.items():
v['total amount'] = sum(v.values())
summary = pd.DataFrame(data)
summary.index.name = 'column type'
display(summary)
print(
f"\033[1mNOTE: numeric categorical columns that contains more than {threshold} "
"classes are considered numeric continuous features.\033[0;0m"
)
print(
"\033[1mNOTE: You can modify the threshold value if you want to consider more or less numeric categorical "
"features as numeric continuous features.\033[0;0m"
)
print("Applying Robust Random Cut Forest Algorithm for outliers detection")
print("Only 'continuous' and 'categorical_integer' are considered for outliers detection")
for key_i, dataframe in dataframes_dict.items():
data_points = dataframe[columns_types_dict[key_i]["continuous"] +
columns_types_dict[key_i]["categorical_integer"]]
data_points = data_points.fillna(data_points.mean()).to_numpy()
_, is_outlier = outliers_detector(data_points)
if sum(is_outlier) > 0:
print(
f"\033[1mNOTE: The dataset {key_i} may have {sum(is_outlier)} outliers.\033[0;0m"
)
return columns_types_dict
| 41.380471
| 235
| 0.668023
|
4a0ed82bb32a77af4ab6cff4ea5959397085a32d
| 6,637
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/halomonassptd01.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/halomonassptd01.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/halomonassptd01.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Halomonas sp. TD01.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:05:44.423135
The undirected graph Halomonas sp. TD01 has 3798 nodes and 517644 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.07179 and has 8 connected components, where the component with most
nodes has 3780 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 245, the mean node degree is 272.59, and
the node degree mode is 2. The top 5 most central nodes are 999141.GME_00050
(degree 1656), 999141.GME_18363 (degree 1593), 999141.GME_05120 (degree
1402), 999141.GME_17522 (degree 1296) and 999141.GME_17657 (degree 1248).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HalomonasSpTd01
# Then load the graph
graph = HalomonasSpTd01()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def HalomonasSpTd01(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Halomonas sp. TD01 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Halomonas sp. TD01 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:05:44.423135
The undirected graph Halomonas sp. TD01 has 3798 nodes and 517644 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.07179 and has 8 connected components, where the component with most
nodes has 3780 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 245, the mean node degree is 272.59, and
the node degree mode is 2. The top 5 most central nodes are 999141.GME_00050
(degree 1656), 999141.GME_18363 (degree 1593), 999141.GME_05120 (degree
1402), 999141.GME_17522 (degree 1296) and 999141.GME_17657 (degree 1248).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HalomonasSpTd01
# Then load the graph
graph = HalomonasSpTd01()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="HalomonasSpTd01",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.116402
| 223
| 0.700015
|
4a0ed8e4fc51373fea48956443be2732cce699b4
| 1,832
|
py
|
Python
|
tests/test_misc.py
|
WeiWeic6222848/PyRep
|
231a1ac6b0a179cff53c1d403d379260b9f05f2f
|
[
"MIT"
] | 505
|
2019-06-26T17:02:44.000Z
|
2022-03-31T04:03:23.000Z
|
tests/test_misc.py
|
WeiWeic6222848/PyRep
|
231a1ac6b0a179cff53c1d403d379260b9f05f2f
|
[
"MIT"
] | 255
|
2019-06-27T07:04:17.000Z
|
2022-03-29T18:25:48.000Z
|
tests/test_misc.py
|
WeiWeic6222848/PyRep
|
231a1ac6b0a179cff53c1d403d379260b9f05f2f
|
[
"MIT"
] | 171
|
2019-06-27T05:33:50.000Z
|
2022-03-30T03:34:24.000Z
|
import unittest
from pyrep.errors import PyRepError
from tests.core import TestCore
from pyrep.misc.distance import Distance
from pyrep.misc.signals import IntegerSignal, FloatSignal
from pyrep.misc.signals import DoubleSignal, StringSignal
# TODO: These tests will be re-enabled once bug has been fixed in CoppeliaSim.
class TestMisc(TestCore):
pass
# def test_get_distance(self):
# Distance('dist_cubes')
# def test_read_distance(self):
# d = Distance('dist_cubes')
# dist = d.read()
# self.assertAlmostEqual(dist, 0.1, places=3)
# def test_read_distance_not_measurable(self):
# d = Distance('dist_cubes_fail')
# with self.assertRaises(PyRepError):
# d.read()
class TestSignal(TestCore):
SIGNALS = [
(IntegerSignal, 99),
(FloatSignal, 55.3),
(DoubleSignal, 22.2),
(StringSignal, 'hello')
]
def test_set_get_clear_signals(self):
for signal_class, test_value in TestSignal.SIGNALS:
with self.subTest(signal=str(signal_class)):
sig = signal_class('my_signal')
sig.set(test_value)
ret_value = sig.get()
if isinstance(test_value, float):
self.assertAlmostEqual(ret_value, test_value, places=3)
else:
self.assertEqual(ret_value, test_value)
clears = sig.clear()
self.assertEqual(clears, 1)
def test_get_signal_fails_when_empty(self):
for signal_class, test_value in TestSignal.SIGNALS:
with self.subTest(signal=str(signal_class)):
sig = signal_class('my_signal')
with self.assertRaises(PyRepError):
sig.get()
if __name__ == '__main__':
unittest.main()
| 31.050847
| 78
| 0.620633
|
4a0ed9c94d287440d2a0d4237c2779a9ee952295
| 16,634
|
py
|
Python
|
POVME/packages/clustering/binding_site_overlap.py
|
guochaoxu2019/POVME3.0
|
717f2764a0a33989f935ef99d48f4afba4bc0fc5
|
[
"MIT"
] | 25
|
2016-07-05T21:54:26.000Z
|
2021-12-09T09:14:36.000Z
|
POVME/packages/clustering/binding_site_overlap.py
|
guochaoxu2019/POVME3.0
|
717f2764a0a33989f935ef99d48f4afba4bc0fc5
|
[
"MIT"
] | 41
|
2016-06-27T17:54:15.000Z
|
2022-03-24T09:09:39.000Z
|
POVME/packages/clustering/binding_site_overlap.py
|
guochaoxu2019/POVME3.0
|
717f2764a0a33989f935ef99d48f4afba4bc0fc5
|
[
"MIT"
] | 10
|
2016-06-27T16:25:06.000Z
|
2022-02-17T02:04:54.000Z
|
#!python
# Calculates the binding site overlap between sets of POVME outputs.
# Started July 9th, 2014
# Celia Wong
# Advised by Jeff Wagner
# Amaro Lab, UCSD
import numpy
import sys
import re
import os
import csv
from optparse import OptionParser
class Trajectory():
def __init__(self):
self.coordinates = []
self.aromatic_coordinates = []
self.hbondAcceptor_coordinates = []
self.hbondDonor_coordinates = []
self.frames = 0
self.frameToFileName = {}
self.volumetric_filename = []
self.aromatic_filename = []
self.hbondAcceptor_filename = []
self.hbondDonor_filename = []
#traj_file is a list containing all the filenames of npy files found
#Must run colored vs uncolored from different directories else uncolored will be reading too many files
#2 pass run, first pass for volumentric only
#Second pass allow for other wanted files assuming the color flag is set (aromatic, hbondAcceptor, hbondDonor)
def read_traj(self,traj_file, color):
#check regex for volumetric npy files
expr = re.compile('frame_\d*.npy')
#frameNo = re.findall(traj_file, 'frame_([0-9]+).npy')
#frameNo = re.findall(traj_file, 'frame_([0-9]+)_aromatic.npy')
sortedFramesAndNames = [(int(re.findall('frame_([0-9]+).npy',name)[0]), name) for name in traj_file]
sortedFramesAndNames.sort(key=lambda x:x[0])
sortedNames = [i[1] for i in sortedFramesAndNames]
print sortedNames
self.frames = len(sortedNames)
for index, fileName in enumerate(sortedNames):
self.volumetric_filename.append(fileName)
self.coordinates.append(set([tuple(dummy_atom) for dummy_atom in numpy.load(fileName)]))
self.frameToFileName[index] = fileName
#count = 0
#for i in range(len(traj_file)):
# match_value = expr.search(traj_file[i])
# if match_value:
# self.volumetric_filename.append(traj_file[i])
# self.frames += 1
# self.coordinates.append(set([tuple(dummy_atom) for dummy_atom in numpy.load(traj_file[i])]))
# self.frameToFileName[count] = traj_file[i]
# count += 1
#print self.volumetric_filename
#Regex to find aromatic, hbondAcceptor, and hbondDonor files
#This should guarantee that the frames are in the same order for volumetric and all color metrics
if (color):
for i in range(len(self.volumetric_filename)):
aromatic_file = self.volumetric_filename[i].strip('.npy')+'_aromatic.npy'
hbondAcc_file = self.volumetric_filename[i].strip('.npy')+'_hbondAcceptor.npy'
hbondDon_file = self.volumetric_filename[i].strip('.npy')+'_hbondDonor.npy'
self.aromatic_filename.append(aromatic_file)
self.aromatic_coordinates.append(set([tuple(dummy_atom[:3]) for dummy_atom in numpy.load(aromatic_file) if dummy_atom[3] > 0.02]))
self.hbondAcceptor_filename.append(hbondAcc_file)
self.hbondAcceptor_coordinates.append(set([tuple(dummy_atom[:3]) for dummy_atom in numpy.load(hbondAcc_file) if dummy_atom[3] > 0.02]))
self.hbondDonor_filename.append(hbondDon_file)
self.hbondDonor_coordinates.append(set([tuple(dummy_atom[:3]) for dummy_atom in numpy.load(hbondDon_file) if dummy_atom[3] > 0.02]))
class Overlap():
def __init__(self,coordinates):
self.coordinates = coordinates
self.volumes = []
for i in range(len(self.coordinates)):
self.volumes.append(len(self.coordinates[i]))
def sum_volumes(self,frame1,frame2):
total = self.volumes[frame1] + self.volumes[frame2]
return total
def number_overlap(self,frame1,frame2):
# must be a set data structure in order to use
setFrame1 = self.coordinates[frame1]
setFrame2 = self.coordinates[frame2]
if not isinstance(setFrame1,set) or not isinstance(setFrame2,set):
print "Coordinates must be contained in a set object"
sys.exit(1)
num_overlap_points = len(setFrame1.intersection(setFrame2))
'''Test here for error values '''
if num_overlap_points > min(len(setFrame1),len(setFrame2)):
print 'invalid overlap value'
return num_overlap_points
# need to calculate the volume that is overlapped
def volume_overlap(self,frame1,frame2):
num_overlap_points = self.number_overlap(frame1, frame2)
overlap_ratio = num_overlap_points / float(len(self.coordinates[frame1]))
vol_overlap = overlap_ratio * self.volumes[frame1]
return vol_overlap
def tanimoto_overlap (self,vlap,frame1,frame2):
#vlap = self.number_overlap(frame1,frame2)
vtotal = self.sum_volumes(frame1,frame2) - vlap
tanimoto = float(vlap/float(vtotal))
return tanimoto
def tversky_overlap(self,vlap,frame1,frame2):
''' Tversky index: overlap/[Va(w/o overlap) + overlap] & overlap/[Vb(w/o overlap) + overlap]
'''
#volume_overlap = self.number_overlap(frame1,frame2)
total_volume = self.volumes[frame1]
#print 'volume_overlap = {0}, total volume = {1}'.format(volume_overlap,total_volume)
tversky = float(vlap/float(total_volume))
return tversky
''' Helper function needed to get all the input files from optparse '''
def vararg_callback(option, opt_str, value, parser):
assert value is None
value = []
for arg in parser.rargs:
if ((arg[:1] == "-" or arg[:2] == "--") and len(arg) > 1):
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
class main():
def __init__(self,argv):
if len(argv) == 1:
print "Cannot run binding_site_overlap.py: Need to specify .npy files to be read"
sys.exit(1)
parser = OptionParser()
parser.add_option("-f", dest = "filename", action = "callback", callback = vararg_callback, help = "All files from POVME that you want to run similarity calculations on.")
parser.add_option("-c", action = "store_true", dest="color", help = "run similarity calculations on colored output from POVME")
parser.add_option("--csv", action = "store_true", dest="csv", help = "save human-readable CSV distance matrices.")
(options, args) = parser.parse_args(argv)
#print command_input['traj_file']
file_input = Trajectory()
#file_input.read_traj(argv)
file_input.read_traj(options.filename, options.color)
''' Saving which index refers to which frame file for use in clustering '''
frames_dict = file_input.frameToFileName
with open('indexMapToFrames.csv','wb') as csvfile:
fieldnames = ['index','frame']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for i in frames_dict:
writer.writerow({"index": i, "frame" : frames_dict[i]})
overlap_value = Overlap(file_input.coordinates)
aromatic_overlap = Overlap(file_input.aromatic_coordinates)
hbondAcc_overlap = Overlap(file_input.hbondAcceptor_coordinates)
hbondDon_overlap = Overlap(file_input.hbondDonor_coordinates)
num_frames = len(file_input.coordinates)
print "The number of frames found was: {0}".format(num_frames)
'''Make a matrix of number of overlapping coordinates first'''
# Always calculate the overlapping points for volumetric
overlapStyle = 2
num_overlap = numpy.empty([file_input.frames,file_input.frames], dtype=float)
if overlapStyle == 1:
for f1 in range(num_frames):
for f2 in range(f1,num_frames):
num_overlap[f1,f2] = overlap_value.number_overlap(f1, f2)
elif overlapStyle == 2:
allPointsSet = set()
for f1 in range(num_frames):
for coord in file_input.coordinates[f1]:
allPointsSet.add(coord)
coord2vectPos = dict([(coord, i) for i, coord in enumerate(allPointsSet)])
nVectPos = len(allPointsSet)
vecPosMatrix = numpy.zeros((num_frames, nVectPos), dtype=numpy.bool)
for f1 in range(num_frames):
for point in file_input.coordinates[f1]:
vecPosMatrix[f1,coord2vectPos[point]] = 1
for f1 in range(num_frames):
for f2 in range(f1,num_frames):
#raise Exception('There\'s a problem here. In comparing two frames in chris condon\'s pockets, values >1 were found in the tanimoto matrix.')
num_overlap[f1,f2] = numpy.count_nonzero(vecPosMatrix[f1,:] & vecPosMatrix[f2,:])
num_overlap[f2,f1] = num_overlap[f1,f2]
# Only calculate the colored option if the color option was set
if (options.color):
aromatic_num_overlap = numpy.empty([file_input.frames,file_input.frames], dtype=float)
for f1 in range(num_frames):
for f2 in range(num_frames):
aromatic_num_overlap[f1,f2] = aromatic_overlap.number_overlap(f1, f2)
hbondAcc_num_overlap = numpy.empty([file_input.frames,file_input.frames], dtype=float)
for f1 in range(num_frames):
for f2 in range(num_frames):
hbondAcc_num_overlap[f1,f2] = hbondAcc_overlap.number_overlap(f1, f2)
hbondDon_num_overlap = numpy.empty([file_input.frames,file_input.frames], dtype=float)
for f1 in range(num_frames):
for f2 in range(num_frames):
hbondDon_num_overlap[f1,f2] = hbondDon_overlap.number_overlap(f1, f2)
'''Record the overlap_matrix in a csv file'''
print "Starting Tanimoto calculations"
tanimoto_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
if (options.color):
aromatic_tanimoto_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
hbondAcc_tanimoto_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
hbondDon_tanimoto_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
colored_tanimoto_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
for f1 in range(num_frames):
for f2 in range(f1,num_frames):
tanimoto_result = overlap_value.tanimoto_overlap(num_overlap[f1,f2],f1,f2)
tanimoto_matrix[f1,f2] = tanimoto_result
tanimoto_matrix[f2,f1] = tanimoto_result
if (options.color):
aromatic_result = aromatic_overlap.tanimoto_overlap(aromatic_num_overlap[f1,f2],f1,f2)
aromatic_tanimoto_matrix[f1,f2] = aromatic_result
aromatic_tanimoto_matrix[f2,f1] = aromatic_result
hbondAcc_result = hbondAcc_overlap.tanimoto_overlap(hbondAcc_num_overlap[f1,f2],f1,f2)
hbondAcc_tanimoto_matrix[f1,f2] = hbondAcc_result
hbondAcc_tanimoto_matrix[f2,f1] = hbondAcc_result
hbondDon_result = hbondDon_overlap.tanimoto_overlap(hbondDon_num_overlap[f1,f2],f1,f2)
hbondDon_tanimoto_matrix[f1,f2] = hbondDon_result
hbondDon_tanimoto_matrix[f2,f1] = hbondDon_result
average_similarity = (aromatic_result + hbondAcc_result + hbondDon_result + tanimoto_result)/4
colored_tanimoto_matrix[f1,f2] = average_similarity
colored_tanimoto_matrix[f2,f1] = average_similarity
print "Overlap Matrix for Tanimoto calculation"
print tanimoto_matrix
if (options.color):
print "Aromatic"
print aromatic_tanimoto_matrix
print "Hydrogen Bond Acceptor"
print hbondAcc_tanimoto_matrix
print "Hydrogen Bond Donor"
print hbondDon_tanimoto_matrix
print "\n"
if (options.csv):
numpy.savetxt(os.getcwd()+'/POVME_Tanimoto_matrix.csv', tanimoto_matrix, delimiter=',')
if (options.color) and (options.csv):
numpy.savetxt(os.getcwd()+'/POVME_Tanimoto_matrix_aromatic.csv', aromatic_tanimoto_matrix, delimiter=',')
numpy.savetxt(os.getcwd()+'/POVME_Tanimoto_matrix_hbondAcceptor.csv', hbondAcc_tanimoto_matrix, delimiter=',')
numpy.savetxt(os.getcwd()+'/POVME_Tanimoto_matrix_hbondDonor.csv', hbondDon_tanimoto_matrix, delimiter=',')
numpy.savetxt(os.getcwd()+'/POVME_Tanimoto_matrix_colored.csv', colored_tanimoto_matrix, delimiter=',')
numpy.save('tanimoto_matrix.npy', tanimoto_matrix)
if (options.color):
numpy.save('aromatic_tanimoto_matrix.npy', aromatic_tanimoto_matrix)
numpy.save('hbondAcc_tanimoto_matrix.npy', hbondAcc_tanimoto_matrix)
numpy.save('hbondDon_tanimoto_matrix.npy', hbondDon_tanimoto_matrix)
print "Starting Tversky calculations"
tversky_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
if (options.color):
aromatic_tversky_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
hbondAcc_tversky_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
hbondDon_tversky_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
colored_tversky_matrix = numpy.empty([file_input.frames,file_input.frames],dtype=float)
for f1 in range(num_frames):
for f2 in range(num_frames):
#print 'length of frame 1 = {0}'.format(len(overlap_value.coordinates[f1]))
tversky_matrix[f1,f2] = overlap_value.tversky_overlap(num_overlap[f1,f2],f1, f2)
if (options.color):
aromatic_tversky_matrix[f1,f2] = aromatic_overlap.tversky_overlap(aromatic_num_overlap[f1,f2], f1, f2)
hbondAcc_tversky_matrix[f1,f2] = hbondAcc_overlap.tversky_overlap(hbondAcc_num_overlap[f1,f2], f1, f2)
hbondDon_tversky_matrix[f1,f2] = hbondDon_overlap.tversky_overlap(hbondDon_num_overlap[f1,f2], f1, f2)
colored_tversky_matrix[f1,f2] = (hbondAcc_tversky_matrix[f1,f2] + hbondDon_tversky_matrix[f1,f2] + tversky_matrix[f1,f2] + aromatic_tversky_matrix[f1,f2])/4
print "Overlap Matrix for Tversky calculation"
print tversky_matrix
if (options.color):
print "Aromatic"
print aromatic_tversky_matrix
print "Hydrogen Bond Acceptor"
print hbondAcc_tversky_matrix
print "Hydrogen Bond Donor"
print hbondDon_tversky_matrix
print "\n"
if (options.csv):
numpy.savetxt(os.getcwd()+'/POVME_Tversky_matrix.csv',tversky_matrix,delimiter=',')
if (options.color) and (options.csv):
numpy.savetxt(os.getcwd()+'/POVME_Tversky_matrix_aromatic.csv',aromatic_tversky_matrix,delimiter=',')
numpy.savetxt(os.getcwd()+'/POVME_Tversky_matrix_hbondAcceptor.csv',hbondAcc_tversky_matrix,delimiter=',')
numpy.savetxt(os.getcwd()+'/POVME_Tversky_matrix_hbondDonor.csv',hbondDon_tversky_matrix,delimiter=',')
numpy.savetxt(os.getcwd()+'/POVME_Tversky_matrix_colored.csv',colored_tversky_matrix,delimiter=',')
numpy.save('tversky_matrix.npy', tversky_matrix)
if (options.color):
numpy.save('aromatic_tversky_matrix.npy', aromatic_tversky_matrix)
numpy.save('hbondAcc_tversky_matrix.npy', hbondAcc_tversky_matrix)
numpy.save('hbondDon_tversky_matrix.npy', hbondDon_tversky_matrix)
#print "Map of index numbers to npy files"
#print frames_dict
if __name__ == "__main__": main(sys.argv)
| 47.121813
| 179
| 0.633401
|
4a0eda06cf695b5840c4e0adfbdcc58f31f1c23f
| 2,792
|
py
|
Python
|
tests/scripts/task_build.py
|
666vulcan/tvm
|
ffd5f70370642c909222f9a4cae8400023dacbdc
|
[
"Apache-2.0"
] | 1
|
2022-03-08T03:03:53.000Z
|
2022-03-08T03:03:53.000Z
|
tests/scripts/task_build.py
|
666vulcan/tvm
|
ffd5f70370642c909222f9a4cae8400023dacbdc
|
[
"Apache-2.0"
] | null | null | null |
tests/scripts/task_build.py
|
666vulcan/tvm
|
ffd5f70370642c909222f9a4cae8400023dacbdc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import shutil
import os
import logging
import multiprocessing
from pathlib import Path
from cmd_utils import Sh, init_log, REPO_ROOT
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--sccache-bucket", required=False, help="sccache bucket name")
parser.add_argument("--num-executors", required=True, help="number of Jenkins executors")
parser.add_argument("--build-dir", default="build", help="build folder")
args = parser.parse_args()
env = {"VTA_HW_PATH": str(Path(os.getcwd()) / "3rdparty" / "vta-hw")}
sccache_exe = shutil.which("sccache")
use_sccache = sccache_exe is not None and args.sccache_bucket is not None
build_dir = Path(os.getcwd()) / args.build_dir
build_dir = build_dir.relative_to(REPO_ROOT)
if use_sccache:
env["SCCACHE_BUCKET"] = args.sccache_bucket
env["CXX"] = "/opt/sccache/c++"
env["CC"] = "/opt/sccache/cc"
logging.info(f"Using sccache bucket: {args.sccache_bucket}")
else:
if sccache_exe is None:
reason = "'sccache' executable not found"
elif args.sccache_bucket is None:
reason = "'sccache' executable not found"
else:
reason = "<unknown>"
logging.info(f"Not using sccache, reason: {reason}")
sh = Sh(env)
if use_sccache:
sh.run("sccache --start-server", check=False)
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
executors = int(args.num_executors)
nproc = multiprocessing.cpu_count()
available_cpus = nproc // executors
num_cpus = max(available_cpus, 1)
sh.run("cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..", cwd=build_dir)
sh.run(f"cmake --build . -- VERBOSE=1 -j{num_cpus}", cwd=build_dir)
if use_sccache:
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
| 35.794872
| 93
| 0.686605
|
4a0eda5698396e2e5952b971b52fdfe747fcd42a
| 5,393
|
py
|
Python
|
src/packages/database/database_manager.py
|
beepbeat/pipsqueak3
|
1663875ab21dbb6bb63a424e480db70f0948d9a4
|
[
"BSD-3-Clause"
] | null | null | null |
src/packages/database/database_manager.py
|
beepbeat/pipsqueak3
|
1663875ab21dbb6bb63a424e480db70f0948d9a4
|
[
"BSD-3-Clause"
] | null | null | null |
src/packages/database/database_manager.py
|
beepbeat/pipsqueak3
|
1663875ab21dbb6bb63a424e480db70f0948d9a4
|
[
"BSD-3-Clause"
] | 1
|
2018-06-24T22:53:03.000Z
|
2018-06-24T22:53:03.000Z
|
"""
database_manager.py - allows connections to SQL databases.
Provides postgreSQL connectivity for mechasqueak3.
Copyright (c) 2018 The Fuel Rat Mischief,
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md
"""
import logging
from typing import Union, Tuple, List, Dict
import psycopg2
from psycopg2 import sql
from config import config
LOG = logging.getLogger(f"mecha.{__name__}")
class DatabaseManager:
"""
Database Manager class intended to be inherited by a parent class that requires database
connectivity. Currently, only PostgreSQL 9.5+ is supported.
ODBC drivers are not required on Windows.
Usage:
>>> DatabaseManager(dbhost='DatabaseServer.org',
... dbport=5432,
... dbname='DatabaseName',
... dbuser='DatabaseUserName',
... dbpassword='UserPassword') # doctest: +SKIP
All arguments are optional. If omitted, config values will be pulled from config file.
Instantiation of the DBM is not intended to be done per method, but rather once as a
class property, and the DatabaseManage.query() method used to perform a query.
Connections are managed by a SimpleConnectionPool, keeping a minimum of 5 and a maximum
of 10 connections, able to dynamically open/close ports as needed.
Performing A Query:
.query() does not accept a direct string. You must use a psycopg2 composed SQL (sql.SQL)
object, with appropriate substitutions.
DO NOT USE STRING CONCATENATION OR APPEND VALUES. THIS IS BAD PRACTICE, AND AN INJECTION
RISK!
>>> query = sql.SQL(
... "SELECT FROM public.table WHERE table.name=%s AND table.lang=%s AND table.something=%s")
>>> dbm.query(query, ('tuple','of','values'))# doctest: +SKIP
"""
def __init__(self,
dbhost=None,
dbport=None,
dbname=None,
dbuser=None,
dbpassword=None
):
if not hasattr(self, "_initialized"):
self._initialized = True
# Utilize function arguments if they are provided,
# otherwise retrieve from config file and use those values.
self._dbhost = dbhost if dbhost is not None else config['database'].get('host')
assert self._dbhost
self._dbport = dbport if dbhost is not None else config['database'].get('port')
assert self._dbport
self._dbname = dbname if dbname is not None else config['database'].get('dbname')
assert self._dbname
self._dbuser = dbuser if dbuser is not None else config['database'].get('username')
assert self._dbuser
self._dbpass = dbpassword if dbpassword is not None else \
config['database'].get('password')
assert self._dbpass
# Create Database Connections Pool
try:
self._dbpool = psycopg2.pool.SimpleConnectionPool(5, 10, host=self._dbhost,
port=self._dbport,
dbname=self._dbname,
user=self._dbuser,
password=self._dbpass)
except psycopg2.DatabaseError as error:
LOG.exception("Unable to connect to database!")
raise error
async def query(self, query: sql.SQL, values: Union[Tuple, Dict]) -> List:
"""
Send a query to the connected database. Pulls a connection from the pool and creates
a cursor, executing the composed query with the values.
Requires a composed SQL object (See psycopg2 docs)
Args:
query: composed SQL query object
values: tuple or dict of values for query
Returns:
List of rows matching query. May return an empty list if there are no matching rows.
"""
# Verify composed SQL object
if not isinstance(query, sql.SQL):
raise TypeError("Expected composed SQL object for query.")
# Verify value is tuple or dict.
if not isinstance(values, (Dict, Tuple)):
raise TypeError(f"Expected tuple or dict for query values.")
# Pull a connection from the pool, and create a cursor from it.
with self._dbpool.getconn() as connection:
# If we could set these at connection time, we would,
# but they must be set outside the pool.
connection.autocommit = True
connection.set_client_encoding('utf-8')
# Create cursor, and execute the query.
with connection.cursor() as cursor:
cursor.execute(query, values)
# Check if cursor.description is NONE - meaning no results returned.
if cursor.description:
result = cursor.fetchall()
else:
# Return a blank tuple if there are no results, since we are
# forcing this to a list.
result = ()
# Release connection back to the pool.
self._dbpool.putconn(connection)
return list(result)
| 39.07971
| 100
| 0.590024
|
4a0edab191f7b25bec98e18a9a2ba8f37342bde7
| 19,277
|
py
|
Python
|
pontoon/administration/views.py
|
Henikilan/pontoon
|
30d8c3d3dd0630d79dce8a75208f9778d6218864
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/administration/views.py
|
Henikilan/pontoon
|
30d8c3d3dd0630d79dce8a75208f9778d6218864
|
[
"BSD-3-Clause"
] | 4
|
2018-05-25T13:38:07.000Z
|
2021-12-13T20:47:16.000Z
|
pontoon/administration/views.py
|
Henikilan/pontoon
|
30d8c3d3dd0630d79dce8a75208f9778d6218864
|
[
"BSD-3-Clause"
] | 1
|
2019-10-07T17:46:36.000Z
|
2019-10-07T17:46:36.000Z
|
from __future__ import absolute_import
import logging
from backports import csv
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db import transaction, IntegrityError
from django.db.models import Max
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.shortcuts import render
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
from pontoon.administration.forms import (
EntityFormSet,
ExternalResourceInlineFormSet,
ProjectForm,
RepositoryInlineFormSet,
SubpageInlineFormSet,
TagInlineFormSet,
)
from pontoon.base import utils
from pontoon.base.utils import require_AJAX
from pontoon.base.models import (
Entity,
Locale,
Project,
ProjectLocale,
Resource,
TranslatedResource,
Translation,
)
from pontoon.sync.models import SyncLog
from pontoon.sync.tasks import sync_project
log = logging.getLogger(__name__)
def admin(request):
"""Admin interface."""
if not request.user.has_perm('base.can_manage_project'):
raise PermissionDenied
projects = (
Project.objects.all()
.prefetch_related('latest_translation__user')
.order_by('name')
)
return render(request, 'admin.html', {
'admin': True,
'projects': projects,
})
def get_slug(request):
"""Convert project name to slug."""
log.debug("Convert project name to slug.")
if not request.user.has_perm('base.can_manage_project'):
log.error("Insufficient privileges.")
return HttpResponse("error")
if not request.is_ajax():
log.error("Non-AJAX request")
return HttpResponse("error")
try:
name = request.GET['name']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Name: " + name)
slug = slugify(name)
log.debug("Slug: " + slug)
return HttpResponse(slug)
@transaction.atomic
def manage_project(request, slug=None, template='admin_project.html'):
"""Admin project."""
log.debug("Admin project.")
if not request.user.has_perm('base.can_manage_project'):
raise PermissionDenied
form = ProjectForm()
subpage_formset = SubpageInlineFormSet()
repo_formset = RepositoryInlineFormSet()
external_resource_formset = ExternalResourceInlineFormSet()
tag_formset = TagInlineFormSet()
locales_readonly = Locale.objects.none()
locales_selected = Locale.objects.none()
subtitle = 'Add project'
pk = None
project = None
# Save project
if request.method == 'POST':
locales_readonly = Locale.objects.filter(
pk__in=request.POST.getlist('locales_readonly')
)
locales_selected = Locale.objects.filter(
pk__in=request.POST.getlist('locales')
).exclude(
pk__in=locales_readonly
)
# Update existing project
try:
pk = request.POST['pk']
project = Project.objects.get(pk=pk)
form = ProjectForm(request.POST, instance=project)
# Needed if form invalid
subpage_formset = SubpageInlineFormSet(request.POST, instance=project)
repo_formset = RepositoryInlineFormSet(request.POST, instance=project)
tag_formset = (
TagInlineFormSet(request.POST, instance=project)
if project.tags_enabled
else None
)
external_resource_formset = ExternalResourceInlineFormSet(
request.POST, instance=project
)
subtitle = 'Edit project'
# Add a new project
except MultiValueDictKeyError:
form = ProjectForm(request.POST)
# Needed if form invalid
subpage_formset = SubpageInlineFormSet(request.POST)
repo_formset = RepositoryInlineFormSet(request.POST)
external_resource_formset = ExternalResourceInlineFormSet(request.POST)
tag_formset = None
if form.is_valid():
project = form.save(commit=False)
subpage_formset = SubpageInlineFormSet(request.POST, instance=project)
repo_formset = RepositoryInlineFormSet(request.POST, instance=project)
external_resource_formset = ExternalResourceInlineFormSet(
request.POST, instance=project
)
if tag_formset:
tag_formset = TagInlineFormSet(request.POST, instance=project)
formsets_valid = (
subpage_formset.is_valid()
and repo_formset.is_valid()
and external_resource_formset.is_valid()
and (tag_formset.is_valid() if tag_formset else True)
)
if formsets_valid:
project.save()
# Manually save ProjectLocales due to intermediary model
locales_form = form.cleaned_data.get('locales', [])
locales_readonly_form = form.cleaned_data.get('locales_readonly', [])
locales = locales_form | locales_readonly_form
(
ProjectLocale.objects
.filter(project=project)
.exclude(locale__pk__in=[l.pk for l in locales])
.delete()
)
for locale in locales:
ProjectLocale.objects.get_or_create(project=project, locale=locale)
# Update readonly flags
locales_readonly_pks = [l.pk for l in locales_readonly_form]
project_locales = ProjectLocale.objects.filter(project=project)
project_locales.exclude(
locale__pk__in=locales_readonly_pks
).update(readonly=False)
project_locales.filter(
locale__pk__in=locales_readonly_pks,
).update(readonly=True)
subpage_formset.save()
repo_formset.save()
external_resource_formset.save()
if tag_formset:
tag_formset.save()
# If the data source is database and there are new strings, save them.
if project.data_source == 'database':
_save_new_strings(project, request.POST.get('new_strings', ''))
_create_or_update_translated_resources(project, locales)
# Properly displays formsets, but removes errors (if valid only)
subpage_formset = SubpageInlineFormSet(instance=project)
repo_formset = RepositoryInlineFormSet(instance=project)
external_resource_formset = ExternalResourceInlineFormSet(instance=project)
if project.tags_enabled:
tag_formset = TagInlineFormSet(instance=project)
subtitle += '. Saved.'
pk = project.pk
else:
subtitle += '. Error.'
else:
subtitle += '. Error.'
# If URL specified and found, show edit, otherwise show add form
elif slug is not None:
try:
project = Project.objects.get(slug=slug)
pk = project.pk
form = ProjectForm(instance=project)
subpage_formset = SubpageInlineFormSet(instance=project)
repo_formset = RepositoryInlineFormSet(instance=project)
tag_formset = (
TagInlineFormSet(instance=project)
if project.tags_enabled
else None
)
external_resource_formset = ExternalResourceInlineFormSet(instance=project)
locales_readonly = Locale.objects.filter(
project_locale__readonly=True,
project_locale__project=project,
)
locales_selected = project.locales.exclude(pk__in=locales_readonly)
subtitle = 'Edit project'
except Project.DoesNotExist:
form = ProjectForm(initial={'slug': slug})
# Override default label suffix
form.label_suffix = ''
projects = []
for p in Project.objects.prefetch_related('locales').order_by('name'):
projects.append({
'name': p.name,
# Cannot use values_list() here, because it hits the DB again
'locales': [l.pk for l in p.locales.all()],
})
locales_available = Locale.objects.exclude(pk__in=locales_selected)
# Admins reason in terms of locale codes (see bug 1394194)
locales_readonly = locales_readonly.order_by('code')
locales_selected = locales_selected.order_by('code')
locales_available = locales_available.order_by('code')
data = {
'slug': slug,
'form': form,
'subpage_formset': subpage_formset,
'repo_formset': repo_formset,
'tag_formset': tag_formset,
'external_resource_formset': external_resource_formset,
'locales_readonly': locales_readonly,
'locales_selected': locales_selected,
'locales_available': locales_available,
'subtitle': subtitle,
'pk': pk,
'project': project,
'projects': projects,
}
# Set locale in Translate link
if hasattr(project, 'locales') and locales_selected:
locale = utils.get_project_locale_from_request(
request, project.locales) or locales_selected[0].code
if locale:
data['translate_locale'] = locale
if Resource.objects.filter(project=project).exists():
data['ready'] = True
return render(request, template, data)
def _get_project_strings_csv(project, entities, output):
"""Return a CSV content of all strings and translations for a project and locale.
The file format looks as follow:
source, locale_code_1, locale_code_2
"string A", "tranlation A1", "tranlation A2"
"string B", "tranlation B1", "tranlation B2"
The first column has all source strings. Then there is one column per enabled locale, each
containing available translations for each source string (or an empty cell). The first line
contains the code of each locale, expect for the first cell which is always "source".
:arg Project project: the project from which to take strings
:arg list entities: the list of all entities of the project
:arg buffer output: a buffer to which the CSV writed will send its data
:returns: the same output object with the CSV data
"""
locales = Locale.objects.filter(project_locale__project=project)
translations = (
Translation.objects
.filter(
entity__resource__project=project,
approved=True,
)
.prefetch_related('locale')
.prefetch_related('entity')
)
all_data = dict((x.id, {'source': x.string}) for x in entities)
for translation in translations:
all_data[translation.entity.id][translation.locale.code] = translation.string
writer = csv.writer(output)
headers = ['source'] + [x.code for x in locales]
writer.writerow(headers)
for string in all_data.values():
row = [string.get(key, '') for key in headers]
writer.writerow(row)
return output
def _get_resource_for_database_project(project):
"""Return the Resource object of an in database project.
If the project has no resource yet, create a new one and return it.
Otherwise, return the existing resource.
Note that a database project should always have only one resource.
:arg Project project: the in-database Project object
:returns: the unique Resource object associated with the project
"""
try:
return Resource.objects.get(
project=project,
)
except Resource.DoesNotExist:
# There's no resource for that project yet, create one.
resource = Resource(
path='database',
project=project,
)
resource.save()
return resource
except Resource.MultipleObjectsReturned:
# There are several resources for this project, that should not
# be allowed. Log an error and raise.
log.error(
'There is more than 1 Resource for in_database project %s' %
project.name
)
raise
def _save_new_strings(project, source):
"""Save a batch of strings into an existing project.
This function takes a batch of new strings as a blob of text, separate individual
strings by new lines, and then stores each one as a new source string for the project.
:arg Project project: the Project object to which new strings will be associated
:arg string source: a text of new-line-separated source strings
:returns: True if new strings have been saved, False otherwise
"""
new_strings = source.strip().split('\n')
# Remove empty strings from the list.
new_strings = [x.strip() for x in new_strings if x.strip()]
if new_strings:
# Create a new fake resource for that project.
resource = _get_resource_for_database_project(project)
resource.total_strings = len(new_strings)
resource.save()
# Insert all new strings into Entity objects, associated to the fake resource.
new_entities = []
for index, new_string in enumerate(new_strings):
string = new_string.strip()
new_entities.append(
Entity(string=string, resource=resource, order=index)
)
Entity.objects.bulk_create(new_entities)
return True
return False
def _create_or_update_translated_resources(
project,
locales=None,
resource=None,
):
if locales is None:
locales = (
Locale.objects
.filter(project_locale__project=project)
)
if resource is None:
resource = _get_resource_for_database_project(project)
for locale in locales:
tr, created = TranslatedResource.objects.get_or_create(
locale_id=locale.pk,
resource=resource,
)
tr.calculate_stats()
def manage_project_strings(request, slug=None):
"""View to manage the source strings of a project.
This view is only accessible for projects that do not have a "Source repo". It allows
admins to add new strings to a project in a batch, and then to edit, remove or comment on
any strings.
"""
if not request.user.has_perm('base.can_manage_project'):
raise PermissionDenied
try:
project = Project.objects.get(slug=slug)
except Project.DoesNotExist:
raise Http404
if project.data_source != 'database':
return HttpResponseForbidden(
'Project %s\'s strings come from a repository, managing strings is forbidden.'
% project.name
)
entities = Entity.objects.filter(resource__project=project, obsolete=False)
project_has_strings = entities.exists()
formset = EntityFormSet(queryset=entities)
if request.GET.get('format') == 'csv':
# Return a CSV document containing all translations for this project.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % project.name
return _get_project_strings_csv(project, entities, response)
if request.method == 'POST':
if not project_has_strings:
# We are receiving new strings in a batch.
new_strings_source = request.POST.get('new_strings', '')
if _save_new_strings(project, new_strings_source):
project_has_strings = True # we have strings now!
_create_or_update_translated_resources(project)
else:
# Get all strings, find the ones that changed, update them in the database.
formset = EntityFormSet(request.POST, queryset=entities)
if formset.is_valid():
resource = Resource.objects.filter(project=project).first()
entity_max_order = entities.aggregate(
Max('order')
)['order__max']
try:
# This line can purposefully cause an exception, and that
# causes trouble in tests, because all tests are
# encapsulated in a single transation. Django thus refuses
# to run any other requests after one has failed, until the
# end of the transation.
# Using transation.atomic here is the way to tell django
# that this is fine.
# See https://stackoverflow.com/questions/21458387/
with transaction.atomic():
formset.save()
except IntegrityError:
# This happens when the user creates a new string. By default,
# it has no resource, and that's a violation of the database
# constraints. So, we want to make sure all entries have a resource.
new_entities = formset.save(commit=False)
for entity in new_entities:
if not entity.resource_id:
entity.resource = resource
# We also use this opportunity to give the new entity
# an order.
entity_max_order += 1
entity.order = entity_max_order
# Note that we save all entities one by one. That shouldn't be a problem
# because we don't expect users to change thousands of strings at once.
# Also, django is smart and ``formset.save()`` only returns Entity
# objects that have changed.
entity.save()
# Update stats with the new number of strings.
resource.total_strings = (
Entity.objects
.filter(resource=resource, obsolete=False)
.count()
)
resource.save()
_create_or_update_translated_resources(project, resource=resource)
# Reinitialize the formset.
formset = EntityFormSet(queryset=entities)
data = {
'project': project,
'entities': entities,
'project_has_strings': project_has_strings,
'entities_form': formset,
}
return render(request, 'admin_project_strings.html', data)
@login_required(redirect_field_name='', login_url='/403')
@require_AJAX
def manually_sync_project(request, slug):
if not request.user.has_perm('base.can_manage_project') or not settings.MANUAL_SYNC:
return HttpResponseForbidden(
"Forbidden: You don't have permission for syncing projects"
)
sync_log = SyncLog.objects.create(start_time=timezone.now())
project = Project.objects.get(slug=slug)
sync_project.delay(project.pk, sync_log.pk)
return HttpResponse('ok')
| 36.234962
| 96
| 0.628677
|
4a0edbc22bc61d19f555e6315492e76f85b6a2fa
| 10,395
|
py
|
Python
|
docs/source/conf.py
|
edublancas/sklearn-model-evaluation
|
1f35d5bcc689a5f4d54c14fde60abf09af9fc374
|
[
"MIT"
] | 351
|
2016-01-27T19:15:27.000Z
|
2022-03-09T15:40:56.000Z
|
docs/source/conf.py
|
edublancas/sklearn-model-evaluation
|
1f35d5bcc689a5f4d54c14fde60abf09af9fc374
|
[
"MIT"
] | 37
|
2016-03-16T03:57:59.000Z
|
2021-06-26T14:02:33.000Z
|
docs/source/conf.py
|
edublancas/sklearn-model-evaluation
|
1f35d5bcc689a5f4d54c14fde60abf09af9fc374
|
[
"MIT"
] | 30
|
2016-01-27T19:27:08.000Z
|
2022-03-31T06:09:59.000Z
|
# -*- coding: utf-8 -*-
#
# sklearn-evaluation documentation build configuration file, created by
# sphinx-quickstart on Fri May 27 12:43:49 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
import hooks
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'nbsphinx',
]
# Configuration parameters for plot_directive
plot_include_source = True
plot_html_show_formats = False
plot_html_show_source_link = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sklearn-evaluation'
copyright = u'2016, Eduardo Blancas Reyes'
author = u'Eduardo Blancas Reyes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import re
import ast
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('../../src/sklearn_evaluation/__init__.py', 'rb') as f:
VERSION = str(
ast.literal_eval(
_version_re.search(f.read().decode('utf-8')).group(1)))
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'sklearn-evaluation v0.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sklearn-evaluationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sklearn-evaluation.tex', u'sklearn-evaluation Documentation',
u'Eduardo Blancas Reyes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'sklearn-evaluation',
u'sklearn-evaluation Documentation', [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sklearn-evaluation', u'sklearn-evaluation Documentation',
author, 'sklearn-evaluation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'sklearn': ('http://scikit-learn.org/stable', None),
'matplotlib': ('http://matplotlib.org/', None),
}
def setup(app):
app.connect('config-inited', hooks.config_init)
| 32.791798
| 80
| 0.717364
|
4a0edbca19b3aedaa3edd590f836257780176efa
| 3,398
|
py
|
Python
|
fibratus/output/amqp.py
|
ach3/fibratus
|
655f0e6cee88caff4f75488fd90bf1bb00693847
|
[
"Apache-2.0"
] | null | null | null |
fibratus/output/amqp.py
|
ach3/fibratus
|
655f0e6cee88caff4f75488fd90bf1bb00693847
|
[
"Apache-2.0"
] | null | null | null |
fibratus/output/amqp.py
|
ach3/fibratus
|
655f0e6cee88caff4f75488fd90bf1bb00693847
|
[
"Apache-2.0"
] | 1
|
2022-03-07T08:05:34.000Z
|
2022-03-07T08:05:34.000Z
|
# Copyright 2016 by Nedim Sabic (RabbitStack)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pika
from fibratus.errors import InvalidPayloadError
from fibratus.output.base import Output
class AmqpOutput(Output):
def __init__(self, **kwargs):
"""Builds a new instance of the AMQP output adapter.
Parameters
----------
kwargs: dict
AMQP configuration
"""
Output.__init__(self)
self._username = kwargs.pop('username', 'guest')
self._password = kwargs.pop('password', 'guest')
self._host = kwargs.pop('host', '127.0.0.1')
self._port = kwargs.pop('port', 5672)
self._vhost = kwargs.pop('vhost', '/')
self._delivery_mode = kwargs.pop('delivery_mode', 1)
credentials = pika.PlainCredentials(self._username, self._password)
self._parameters = pika.ConnectionParameters(self._host,
self._port,
self._vhost,
credentials)
self._exchange = kwargs.pop('exchange', None)
self._routingkey = kwargs.pop('routingkey', None)
self._connection = None
self._channel = None
self._basic_props = pika.BasicProperties(content_type='text/json',
delivery_mode=self._delivery_mode)
def emit(self, body, **kwargs):
if not self._connection:
self._connection = pika.BlockingConnection(self._parameters)
self._channel = self._connection.channel()
# override the default exchange name
# and the routing key used to send
# the message to the AMQP broker
self._routingkey = kwargs.pop('routingkey', self._routingkey)
self._exchange = kwargs.pop('exchange', self._exchange)
# the message body should be a dictionary
if not isinstance(body, dict):
raise InvalidPayloadError('invalid payload for AMQP message. '
'dict expected but %s found'
% type(body))
body = json.dumps(body)
self._channel.basic_publish(self._exchange,
self._routingkey,
body, self._basic_props)
@property
def username(self):
return self._username
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def vhost(self):
return self._vhost
@property
def exchange(self):
return self._exchange
@property
def routingkey(self):
return self._routingkey
@property
def delivery_mode(self):
return self._delivery_mode
| 32.361905
| 83
| 0.598293
|
4a0edbdc7458512256a1959f00e2bcf01351f6fb
| 18,194
|
py
|
Python
|
doc/source/notebooks/intro_to_gpflow2.pct.py
|
yiwei-prowler/GPflow
|
de08879228e4077f4476279d208fd23634508a8b
|
[
"Apache-2.0"
] | 1
|
2020-05-07T12:25:40.000Z
|
2020-05-07T12:25:40.000Z
|
doc/source/notebooks/intro_to_gpflow2.pct.py
|
yiwei-prowler/GPflow
|
de08879228e4077f4476279d208fd23634508a8b
|
[
"Apache-2.0"
] | null | null | null |
doc/source/notebooks/intro_to_gpflow2.pct.py
|
yiwei-prowler/GPflow
|
de08879228e4077f4476279d208fd23634508a8b
|
[
"Apache-2.0"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# GPflow with TensorFlow 2
# ===
#
# ##### Small steps big changes
#
# <br>
#
#
# %%
from typing import Tuple, Optional
import tempfile
import pathlib
import datetime
import io
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.config import default_float
from gpflow.ci_utils import ci_niter
from gpflow.utilities import to_default_float
import warnings
warnings.filterwarnings("ignore")
# %% [markdown]
# Make `tensorboard` work inside notebook:
# %%
output_logdir = "/tmp/tensorboard"
# !rm -rf "{output_logdir}"
# !mkdir "{output_logdir}"
# %load_ext tensorboard
# %matplotlib inline
def enumerated_logdir(_logdir_id: int = [0]):
logdir = pathlib.Path(output_logdir, str(_logdir_id[0]))
_logdir_id[0] += 1
return str(logdir)
# %% [markdown]
# Set up random seeds and default float for `gpflow` tensors:
# %%
gpflow.config.set_default_float(np.float64)
np.random.seed(0)
tf.random.set_seed(0)
# %% [markdown]
# ## Loading data using TensorFlow Datasets
#
# For this example, we create a synthetic dataset (noisy sine function):
# %%
def noisy_sin(x):
return tf.math.sin(x) + 0.1 * tf.random.normal(x.shape, dtype=default_float())
num_train_data, num_test_data = 100, 500
X = tf.random.uniform((num_train_data, 1), dtype=default_float()) * 10
Xtest = tf.random.uniform((num_test_data, 1), dtype=default_float()) * 10
Y = noisy_sin(X)
Ytest = noisy_sin(Xtest)
data = (X, Y)
plt.plot(X, Y, "xk")
plt.show()
# %% [markdown]
# Working with TensorFlow Datasets is an efficient way to rapidly shuffle, iterate, and batch from data. For `prefetch` size we use `tf.data.experimental.AUTOTUNE` as recommended by TensorFlow [guidelines](https://www.tensorflow.org/guide/data_performance).
# %%
train_dataset = tf.data.Dataset.from_tensor_slices((X, Y))
test_dataset = tf.data.Dataset.from_tensor_slices((Xtest, Ytest))
batch_size = 32
num_features = 10
prefetch_size = tf.data.experimental.AUTOTUNE
shuffle_buffer_size = num_train_data // 2
num_batches_per_epoch = num_train_data // batch_size
original_train_dataset = train_dataset
train_dataset = (
train_dataset.repeat()
.prefetch(prefetch_size)
.shuffle(buffer_size=shuffle_buffer_size)
.batch(batch_size)
)
print(f"prefetch_size={prefetch_size}")
print(f"shuffle_buffer_size={shuffle_buffer_size}")
print(f"num_batches_per_epoch={num_batches_per_epoch}")
# %% [markdown]
# ## Define a GP model
#
# In GPflow 2.0, we use `tf.Module` (or the very thin `gpflow.base.Module` wrapper) to build all our models, as well as their components (kernels, likelihoods, parameters, and so on).
# %%
kernel = gpflow.kernels.SquaredExponential(variance=2.0)
likelihood = gpflow.likelihoods.Gaussian()
inducing_variable = np.linspace(0, 10, num_features).reshape(-1, 1)
model = gpflow.models.SVGP(
kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable
)
# %% [markdown]
# You can set a module (or a particular parameter) to be non-trainable using the auxiliary method ```set_trainable(module, False)```:
# %%
from gpflow import set_trainable
set_trainable(likelihood, False)
set_trainable(kernel.variance, False)
set_trainable(likelihood, True)
set_trainable(kernel.variance, True)
# %% [markdown]
# We can use ```param.assign(value)``` to assign a value to a parameter:
# %%
kernel.lengthscales.assign(0.5)
# %% [markdown]
# All these changes are reflected when we use ```print_summary(model)``` to print a detailed summary of the model. By default the output is displayed in a minimalistic and simple table.
# %%
from gpflow.utilities import print_summary
print_summary(model) # same as print_summary(model, fmt="fancy_table")
# %% [markdown]
# We can change default printing so that it will look nicer in our notebook:
# %%
gpflow.config.set_default_summary_fmt("notebook")
print_summary(model) # same as print_summary(model, fmt="notebook")
# %% [markdown]
# Jupyter notebooks also format GPflow classes (that are subclasses of `gpflow.base.Module`) in the same nice way when at the end of a cell (this is independent of the `default_summary_fmt`):
# %%
model
# %% [markdown]
# ## Training using training_loss and training_loss_closure
#
# GPflow models come with training_loss and training_loss_closure methods to make it easy to train your models.
# There is a slight difference between models that own their own data (most of them, e.g. GPR, VGP, ...) and models that do not own the data (SVGP).
#
# ### Model-internal data
# For models that own their own data (inheriting from InternalDataTrainingLossMixin), data is provided at model construction time.
# In this case, model.training_loss does not take any arguments, and can be directly passed to an optimizer's `minimize()` method:
# %%
vgp_model = gpflow.models.VGP(data, kernel, likelihood)
optimizer = tf.optimizers.Adam()
optimizer.minimize(
vgp_model.training_loss, vgp_model.trainable_variables
) # Note: this does a single step
# In practice, you will need to call minimize() many times, this will be further discussed below.
# %% [markdown]
# This also works for the Scipy optimizer, though it will do the full optimization on a single call to minimize():
# %%
optimizer = gpflow.optimizers.Scipy()
optimizer.minimize(
vgp_model.training_loss, vgp_model.trainable_variables, options=dict(maxiter=ci_niter(1000))
)
# %% [markdown]
# You can obtain a compiled version using training_loss_closure, whose `compile` argument is True by default:
# %%
vgp_model.training_loss_closure() # compiled
vgp_model.training_loss_closure(compile=True) # compiled
vgp_model.training_loss_closure(compile=False) # uncompiled, same as vgp_model.training_loss
# %% [markdown]
# The SVGP model inherits from ExternalDataTrainingLossMixin and expects the data to be passed to training_loss().
# For SVGP as for the other regression models, `data` is a two-tuple of `(X, Y)`, where `X` is an array/tensor with shape `(num_data, input_dim)` and `Y` is an array/tensor with shape `(num_data, output_dim)`:
# %%
assert isinstance(model, gpflow.models.SVGP)
model.training_loss(data)
# %% [markdown]
# To make optimizing it easy, it has a `training_loss_closure()` method, that takes the data and returns a closure that computes the training loss on this data:
# %%
optimizer = tf.optimizers.Adam()
training_loss = model.training_loss_closure(
data
) # We save the compiled closure in a variable so as not to re-compile it each step
optimizer.minimize(training_loss, model.trainable_variables) # Note that this does a single step
# %% [markdown]
# SVGP can handle mini-batching, and an iterator from a batched tf.data.Dataset can be passed to the model's training_loss_closure():
# %%
batch_size = 5
batched_dataset = tf.data.Dataset.from_tensor_slices(data).batch(batch_size)
training_loss = model.training_loss_closure(iter(batched_dataset))
optimizer.minimize(training_loss, model.trainable_variables) # Note that this does a single step
# %% [markdown]
# As previously, training_loss_closure takes an optional `compile` argument for tf.function compilation (True by default).
# %% [markdown]
# ## Training using Gradient Tapes
#
# For a more elaborate example of a gradient update we can define an `optimization_step` that explicitly computes and applies gradients to the model.
# In TensorFlow 2, we can optimize (trainable) model parameters with TensorFlow optimizers using `tf.GradientTape`. In this simple example, we perform one gradient update of the Adam optimizer to minimize the training_loss (in this case the negative ELBO) of our model.
# The `optimization_step` can (and should) be wrapped in `tf.function` to be compiled to a graph if executing it many times.
# %%
def optimization_step(model: gpflow.models.SVGP, batch: Tuple[tf.Tensor, tf.Tensor]):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(model.trainable_variables)
loss = model.training_loss(batch)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# %% [markdown]
# We can use the functionality of TensorFlow Datasets to define a simple training loop that iterates over batches of the training dataset:
# %%
def simple_training_loop(model: gpflow.models.SVGP, epochs: int = 1, logging_epoch_freq: int = 10):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for _ in range(ci_niter(num_batches_per_epoch)):
tf_optimization_step(model, next(batches))
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}")
# %%
simple_training_loop(model, epochs=10, logging_epoch_freq=2)
# %% [markdown]
# ## Monitoring
#
# `gpflow.monitor` provides a thin wrapper on top of tf.summary that makes it easy to monitor the training procedure.
# For a more detailed tutorial see the [monitoring notebook](./basics/monitoring.pct.py).
# %%
from gpflow.monitor import (
ImageToTensorBoard,
ModelToTensorBoard,
ExecuteCallback,
Monitor,
MonitorTaskGroup,
ScalarToTensorBoard,
)
samples_input = np.linspace(0, 10, 100).reshape(-1, 1)
def plot_model(fig, ax):
tf.print("Plotting...")
mean, var = model.predict_f(samples_input)
num_samples = 10
samples = model.predict_f_samples(samples_input, num_samples)
ax.plot(samples_input, mean, "C0", lw=2)
ax.fill_between(
samples_input[:, 0],
mean[:, 0] - 1.96 * np.sqrt(var[:, 0]),
mean[:, 0] + 1.96 * np.sqrt(var[:, 0]),
color="C0",
alpha=0.2,
)
ax.plot(X, Y, "kx")
ax.plot(samples_input, samples[:, :, 0].numpy().T, "C0", linewidth=0.5)
ax.set_ylim(-2.0, +2.0)
ax.set_xlim(0, 10)
def print_cb(epoch_id=None, data=None):
tf.print(f"Epoch {epoch_id}: ELBO (train)", model.elbo(data))
def elbo_cb(data=None, **_):
return model.elbo(data)
output_logdir = enumerated_logdir()
model_task = ModelToTensorBoard(output_logdir, model)
elbo_task = ScalarToTensorBoard(output_logdir, elbo_cb, "elbo")
print_task = ExecuteCallback(callback=print_cb)
# We group these tasks and specify a period of `100` steps for them
fast_tasks = MonitorTaskGroup([model_task, elbo_task, print_task], period=100)
# We also want to see the model's fit during the optimisation
image_task = ImageToTensorBoard(output_logdir, plot_model, "samples_image")
# We typically don't want to plot too frequently during optimisation,
# which is why we specify a larger period for this task.
slow_taks = MonitorTaskGroup(image_task, period=500)
monitor = Monitor(fast_tasks, slow_taks)
def monitored_training_loop(epochs: int):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for _ in range(ci_niter(num_batches_per_epoch)):
batch = next(batches)
tf_optimization_step(model, batch)
epoch_id = epoch + 1
monitor(epoch, epoch_id=epoch_id, data=data)
# %% [markdown]
# NOTE: for optimal performance it is recommended to wrap the monitoring inside `tf.function`.
# This is detailed in the [monitoring notebook](./basics/monitoring.ipynb).
# %%
model = gpflow.models.SVGP(
kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable
)
monitored_training_loop(epochs=1000)
# %% [markdown]
# Then, we can use TensorBoard to examine the training procedure in more detail
# %%
# # %tensorboard --logdir "{output_logdir}"
# %% [markdown]
# ## Saving and loading models
#
# ### Checkpointing
#
# With the help of `tf.train.CheckpointManager` and `tf.train.Checkpoint`, we can checkpoint the model throughout the training procedure. Let's start with a simple example using checkpointing to save and load a `tf.Variable`:
# %%
initial_value = 1.2
a = tf.Variable(initial_value)
# %% [markdown]
# Create `Checkpoint` object:
# %%
ckpt = tf.train.Checkpoint(a=a)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=3)
# %% [markdown]
# Save the variable `a` and change its value right after:
# %%
manager.save()
_ = a.assign(0.33)
# %% [markdown]
# Now we can restore the old variable value:
# %%
print(f"Current value of variable a: {a.numpy():0.3f}")
ckpt.restore(manager.latest_checkpoint)
print(f"Value of variable a after restore: {a.numpy():0.3f}")
# %% [markdown]
# In the example below, we modify a simple training loop to save the model every 100 epochs using the `CheckpointManager`.
# %%
model = gpflow.models.SVGP(
kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable
)
def checkpointing_training_loop(
model: gpflow.models.SVGP,
batch_size: int,
epochs: int,
manager: tf.train.CheckpointManager,
logging_epoch_freq: int = 100,
epoch_var: Optional[tf.Variable] = None,
step_var: Optional[tf.Variable] = None,
):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for step in range(ci_niter(num_batches_per_epoch)):
tf_optimization_step(model, next(batches))
if step_var is not None:
step_var.assign(epoch * num_batches_per_epoch + step + 1)
if epoch_var is not None:
epoch_var.assign(epoch + 1)
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
ckpt_path = manager.save()
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}, saved at {ckpt_path}")
# %%
step_var = tf.Variable(1, dtype=tf.int32, trainable=False)
epoch_var = tf.Variable(1, dtype=tf.int32, trainable=False)
ckpt = tf.train.Checkpoint(model=model, step=step_var, epoch=epoch_var)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=5)
print(f"Checkpoint folder path at: {output_logdir}")
checkpointing_training_loop(
model,
batch_size=batch_size,
epochs=1000,
manager=manager,
epoch_var=epoch_var,
step_var=step_var,
)
# %% [markdown]
# After the models have been saved, we can restore them using ```tf.train.Checkpoint.restore``` and assert that their performance corresponds to that logged during training.
# %%
for i, recorded_checkpoint in enumerate(manager.checkpoints):
ckpt.restore(recorded_checkpoint)
print(
f"{i} restored model from epoch {int(epoch_var)} [step:{int(step_var)}] : ELBO training set {model.elbo(data)}"
)
# %% [markdown]
# ## Copying (hyper)parameter values between models
#
# It is easy to interact with the set of all parameters of a model or a subcomponent programmatically.
#
# The following returns a dictionary of all parameters within
# %%
model = gpflow.models.SGPR(data, kernel=kernel, inducing_variable=inducing_variable)
# %%
gpflow.utilities.parameter_dict(model)
# %% [markdown]
# Such a dictionary can be assigned back to this model (or another model with the same tree of parameters) as follows:
# %%
params = gpflow.utilities.parameter_dict(model)
gpflow.utilities.multiple_assign(model, params)
# %% [markdown]
# ### TensorFlow `saved_model`
#
# At present, TensorFlow does not support saving custom variables like instances of the `gpflow.base.Parameter` class, see [this TensorFlow github issue](https://github.com/tensorflow/tensorflow/issues/34908).
#
# However, once training is complete, it is possible to clone the model and replace all `gpflow.base.Parameter`s with `tf.constant`s holding the same value:
# %%
model
# %%
frozen_model = gpflow.utilities.freeze(model)
# %% [markdown]
# In order to save the model we need to define a `tf.Module` holding the `tf.function`'s that we wish to export, as well as a reference to the underlying model:
# %%
module_to_save = tf.Module()
predict_fn = tf.function(
frozen_model.predict_f,
input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float64)],
autograph=False,
)
module_to_save.predict = predict_fn
# %% [markdown]
# Save original result for futher comparison
# %%
original_result = module_to_save.predict(samples_input)
# %% [markdown]
# Let's save the module
# %%
save_dir = str(pathlib.Path(tempfile.gettempdir()))
tf.saved_model.save(module_to_save, save_dir)
# %% [markdown]
# Load module back as new instance and compare predict results
# %%
loaded_model = tf.saved_model.load(save_dir)
loaded_result = loaded_model.predict(samples_input)
np.testing.assert_array_equal(loaded_result, original_result)
# %% [markdown]
# ## User config update
#
# In this notebook, we used a lot `gpflow.config` methods for setting and getting default attributes from global configuration. However, GPflow provides a way for local config modification without updating values in global. As you can see below, using `gpflow.config.as_context` replaces temporarily global config with your instance. At creation time, custom config instance uses standard values from the global config:
# %%
user_config = gpflow.config.Config(float=tf.float32, positive_bijector="exp")
user_str = "User config\t"
global_str = "Global config\t"
with gpflow.config.as_context(user_config):
print(f"{user_str} gpflow.config.default_float = {gpflow.config.default_float()}")
print(
f"{user_str} gpflow.config.positive_bijector = {gpflow.config.default_positive_bijector()}"
)
print(f"{global_str} gpflow.config.default_float = {gpflow.config.default_float()}")
print(f"{global_str} gpflow.config.positive_bijector = {gpflow.config.default_positive_bijector()}")
# %%
with gpflow.config.as_context(user_config):
p = gpflow.Parameter(1.1, transform=gpflow.utilities.positive())
print(f"{user_str}{p}")
p = gpflow.Parameter(1.1, transform=gpflow.utilities.positive())
print(f"{global_str}{p}")
| 32.088183
| 419
| 0.732989
|
4a0edc79aae76e182b6e5b09f855e8b59860c533
| 14,683
|
py
|
Python
|
examples/entity_disambiguation/main.py
|
techthiyanes/luke
|
aa234d934527eda96a9a4c3bd9c7999c05c3fd58
|
[
"Apache-2.0"
] | null | null | null |
examples/entity_disambiguation/main.py
|
techthiyanes/luke
|
aa234d934527eda96a9a4c3bd9c7999c05c3fd58
|
[
"Apache-2.0"
] | null | null | null |
examples/entity_disambiguation/main.py
|
techthiyanes/luke
|
aa234d934527eda96a9a4c3bd9c7999c05c3fd58
|
[
"Apache-2.0"
] | null | null | null |
import functools
import json
import logging
import os
import random
from argparse import Namespace
import click
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import WEIGHTS_NAME
from wikipedia2vec.dump_db import DumpDB
from luke.utils.entity_vocab import MASK_TOKEN, PAD_TOKEN
from ..utils.trainer import Trainer, trainer_args
from ..utils import set_seed
from .model import LukeForEntityDisambiguation
from .utils import EntityDisambiguationDataset, convert_documents_to_features
logger = logging.getLogger(__name__)
@click.group(name="entity-disambiguation")
def cli():
pass
@cli.command()
@click.option("--data-dir", type=click.Path(exists=True), default="data/entity_disambiguation")
@click.option(
"-t",
"--test-set",
default=["test_b", "test_b_ppr", "ace2004", "aquaint", "msnbc", "wikipedia", "clueweb"],
multiple=True,
)
@click.option("--do-train/--no-train", default=False)
@click.option("--do-eval/--no-eval", default=True)
@click.option("--num-train-epochs", default=2)
@click.option("--train-batch-size", default=1)
@click.option("--max-seq-length", default=512)
@click.option("--max-candidate-length", default=30)
@click.option("--masked-entity-prob", default=0.9)
@click.option("--use-context-entities/--no-context-entities", default=True)
@click.option(
"--context-entity-selection-order", default="highest_prob", type=click.Choice(["natural", "random", "highest_prob"])
)
@click.option("--document-split-mode", default="per_mention", type=click.Choice(["simple", "per_mention"]))
@click.option("--fix-entity-emb/--update-entity-emb", default=True)
@click.option("--fix-entity-bias/--update-entity-bias", default=True)
@click.option("--seed", default=1)
@trainer_args
@click.pass_obj
def run(common_args, **task_args):
task_args.update(common_args)
args = Namespace(**task_args)
set_seed(args.seed)
dataset = EntityDisambiguationDataset(args.data_dir)
entity_titles = []
for data in dataset.get_all_datasets():
for document in data:
for mention in document.mentions:
entity_titles.append(mention.title)
for candidate in mention.candidates:
entity_titles.append(candidate.title)
entity_titles = frozenset(entity_titles)
entity_vocab = {PAD_TOKEN: 0, MASK_TOKEN: 1}
for n, title in enumerate(sorted(entity_titles), 2):
entity_vocab[title] = n
model_config = args.model_config
model_config.entity_vocab_size = len(entity_vocab)
model_weights = args.model_weights
orig_entity_vocab = args.entity_vocab
orig_entity_emb = model_weights["entity_embeddings.entity_embeddings.weight"]
if orig_entity_emb.size(0) != len(entity_vocab): # detect whether the model is fine-tuned
entity_emb = orig_entity_emb.new_zeros((len(entity_titles) + 2, model_config.hidden_size))
orig_entity_bias = model_weights["entity_predictions.bias"]
entity_bias = orig_entity_bias.new_zeros(len(entity_titles) + 2)
for title, index in entity_vocab.items():
if title in orig_entity_vocab:
orig_index = orig_entity_vocab[title]
entity_emb[index] = orig_entity_emb[orig_index]
entity_bias[index] = orig_entity_bias[orig_index]
model_weights["entity_embeddings.entity_embeddings.weight"] = entity_emb
model_weights["entity_embeddings.mask_embedding"] = entity_emb[1].view(1, -1)
model_weights["entity_predictions.decoder.weight"] = entity_emb
model_weights["entity_predictions.bias"] = entity_bias
del orig_entity_bias, entity_emb, entity_bias
del orig_entity_emb
model = LukeForEntityDisambiguation(model_config)
model.load_state_dict(model_weights, strict=False)
model.to(args.device)
def collate_fn(batch, is_eval=False):
def create_padded_sequence(attr_name, padding_value):
tensors = [torch.tensor(getattr(o, attr_name), dtype=torch.long) for o in batch]
return torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True, padding_value=padding_value)
ret = dict(
word_ids=create_padded_sequence("word_ids", args.tokenizer.pad_token_id),
word_segment_ids=create_padded_sequence("word_segment_ids", 0),
word_attention_mask=create_padded_sequence("word_attention_mask", 0),
entity_ids=create_padded_sequence("entity_ids", 0),
entity_position_ids=create_padded_sequence("entity_position_ids", -1),
entity_segment_ids=create_padded_sequence("entity_segment_ids", 0),
entity_attention_mask=create_padded_sequence("entity_attention_mask", 0),
)
ret["entity_candidate_ids"] = create_padded_sequence("entity_candidate_ids", 0)
if is_eval:
ret["document"] = [o.document for o in batch]
ret["mentions"] = [o.mentions for o in batch]
ret["target_mention_indices"] = [o.target_mention_indices for o in batch]
return ret
if args.do_train:
train_data = convert_documents_to_features(
dataset.train,
args.tokenizer,
entity_vocab,
"train",
"simple",
args.max_seq_length,
args.max_candidate_length,
args.max_mention_length,
)
train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size, collate_fn=collate_fn, shuffle=True)
logger.info("Fix entity embeddings during training: %s", args.fix_entity_emb)
if args.fix_entity_emb:
model.entity_embeddings.entity_embeddings.weight.requires_grad = False
logger.info("Fix entity bias during training: %s", args.fix_entity_bias)
if args.fix_entity_bias:
model.entity_predictions.bias.requires_grad = False
num_train_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
trainer = EntityDisambiguationTrainer(args, model, train_dataloader, num_train_steps)
trainer.train()
if args.output_dir:
logger.info("Saving model to %s", args.output_dir)
torch.save(model.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME))
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
results = {}
if args.do_eval:
model.eval()
for dataset_name in args.test_set:
print("***** Dataset: %s *****" % dataset_name)
eval_documents = getattr(dataset, dataset_name)
eval_data = convert_documents_to_features(
eval_documents,
args.tokenizer,
entity_vocab,
"eval",
args.document_split_mode,
args.max_seq_length,
args.max_candidate_length,
args.max_mention_length,
)
eval_dataloader = DataLoader(
eval_data, batch_size=1, collate_fn=functools.partial(collate_fn, is_eval=True)
)
predictions_file = None
if args.output_dir:
predictions_file = os.path.join(args.output_dir, "eval_predictions_%s.jsonl" % dataset_name)
results[dataset_name] = evaluate(args, eval_dataloader, model, entity_vocab, predictions_file)
if args.output_dir:
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as f:
json.dump(results, f, indent=2, sort_keys=True)
return results
@cli.command()
@click.argument("dump_db_file", type=click.Path(exists=True))
@click.argument("out_file", type=click.File("w"))
@click.option("--data-dir", type=click.Path(exists=True), default="data/entity-disambiguation")
def create_candidate_list(dump_db_file, out_file, data_dir):
dump_db = DumpDB(dump_db_file)
titles = set()
valid_titles = frozenset(dump_db.titles())
reader = EntityDisambiguationDataset(data_dir)
for documents in reader.get_all_datasets():
for document in documents:
for mention in document.mentions:
candidates = mention.candidates
for candidate in candidates:
title = dump_db.resolve_redirect(candidate.title)
if title in valid_titles:
titles.add(title)
for title in titles:
out_file.write(title + "\n")
@cli.command()
@click.argument("dump_db_file", type=click.Path(exists=True))
@click.argument("out_file", type=click.File(mode="w"))
def create_title_list(dump_db_file, out_file):
dump_db = DumpDB(dump_db_file)
for title in dump_db.titles():
out_file.write(f"{title}\n")
@cli.command()
@click.argument("dump_db_file", type=click.Path(exists=True))
@click.argument("out_file", type=click.File(mode="w"))
def create_redirect_tsv(dump_db_file, out_file):
dump_db = DumpDB(dump_db_file)
for src, dest in dump_db.redirects():
out_file.write(f"{src}\t{dest}\n")
class EntityDisambiguationTrainer(Trainer):
def _create_model_arguments(self, batch):
batch["entity_labels"] = batch["entity_ids"].clone()
for index, entity_length in enumerate(batch["entity_attention_mask"].sum(1).tolist()):
masked_entity_length = max(1, round(entity_length * self.args.masked_entity_prob))
permutated_indices = torch.randperm(entity_length)[:masked_entity_length]
batch["entity_ids"][index, permutated_indices[:masked_entity_length]] = 1 # [MASK]
batch["entity_labels"][index, permutated_indices[masked_entity_length:]] = -1
return batch
def evaluate(args, eval_dataloader, model, entity_vocab, output_file=None):
predictions = []
context_entities = []
labels = []
documents = []
mentions = []
reverse_entity_vocab = {v: k for k, v in entity_vocab.items()}
for item in tqdm(eval_dataloader, leave=False): # the batch size must be 1
inputs = {
k: v.to(args.device) for k, v in item.items() if k not in ("document", "mentions", "target_mention_indices")
}
entity_ids = inputs.pop("entity_ids")
entity_attention_mask = inputs.pop("entity_attention_mask")
input_entity_ids = entity_ids.new_full(entity_ids.size(), 1) # [MASK]
entity_length = entity_ids.size(1)
with torch.no_grad():
if args.use_context_entities:
result = torch.zeros(entity_length, dtype=torch.long)
prediction_order = torch.zeros(entity_length, dtype=torch.long)
for n in range(entity_length):
logits = model(entity_ids=input_entity_ids, entity_attention_mask=entity_attention_mask, **inputs)[
0
]
probs = F.softmax(logits, dim=2) * (input_entity_ids == 1).unsqueeze(-1).type_as(logits)
max_probs, max_indices = torch.max(probs.squeeze(0), dim=1)
if args.context_entity_selection_order == "highest_prob":
target_index = torch.argmax(max_probs, dim=0)
elif args.context_entity_selection_order == "random":
target_index = random.choice((input_entity_ids == 1).squeeze(0).nonzero().view(-1).tolist())
elif args.context_entity_selection_order == "natural":
target_index = (input_entity_ids == 1).squeeze(0).nonzero().view(-1)[0]
input_entity_ids[0, target_index] = max_indices[target_index]
result[target_index] = max_indices[target_index]
prediction_order[target_index] = n
else:
logits = model(entity_ids=input_entity_ids, entity_attention_mask=entity_attention_mask, **inputs)[0]
result = torch.argmax(logits, dim=2).squeeze(0)
for index in item["target_mention_indices"][0]:
predictions.append(result[index].item())
labels.append(entity_ids[0, index].item())
documents.append(item["document"][0])
mentions.append(item["mentions"][0][index])
if args.use_context_entities:
context_entities.append(
[
dict(
order=prediction_order[n].item(),
prediction=reverse_entity_vocab[result[n].item()],
label=mention.title,
text=mention.text,
)
for n, mention in enumerate(item["mentions"][0])
if prediction_order[n] < prediction_order[index]
]
)
else:
context_entities.append([])
num_correct = 0
num_mentions = 0
num_mentions_with_candidates = 0
eval_predictions = []
for prediction, label, document, mention, cxt in zip(predictions, labels, documents, mentions, context_entities):
if prediction == label:
num_correct += 1
assert not (mention.candidates and prediction == 0)
assert label != 0
num_mentions += 1
if mention.candidates:
num_mentions_with_candidates += 1
eval_predictions.append(
dict(
document_id=document.id,
document_words=document.words,
document_length=len(document.words),
mention_length=len(document.mentions),
mention=dict(
label=mention.title,
text=mention.text,
span=(mention.start, mention.end),
candidate_length=len(mention.candidates),
candidates=[dict(prior_prob=c.prior_prob, title=c.title) for c in mention.candidates],
),
prediction=reverse_entity_vocab[prediction],
context_entities=cxt,
)
)
if output_file:
with open(output_file, "w") as f:
for obj in eval_predictions:
f.write(json.dumps(obj) + "\n")
precision = num_correct / num_mentions_with_candidates
recall = num_correct / num_mentions
f1 = 2.0 * precision * recall / (precision + recall)
print("F1: %.5f" % f1)
print("Precision: %.5f" % precision)
print("Recall: %.5f" % recall)
return dict(precision=precision, recall=recall, f1=f1)
| 41.713068
| 120
| 0.640469
|
4a0edce1db4cf3722a50b30ad570a5f5c148b1fe
| 1,371
|
py
|
Python
|
tatami/client.py
|
ayemos/tatami
|
431c1fb3cd1614731083db1436d9021efe6c0a1b
|
[
"MIT"
] | null | null | null |
tatami/client.py
|
ayemos/tatami
|
431c1fb3cd1614731083db1436d9021efe6c0a1b
|
[
"MIT"
] | null | null | null |
tatami/client.py
|
ayemos/tatami
|
431c1fb3cd1614731083db1436d9021efe6c0a1b
|
[
"MIT"
] | null | null | null |
import six
import os
import json
from six.moves.urllib.request import urlopen
from tatami.downloaders import *
class Client(object):
def __init__(self, tatami_host,
data_directory_path="%s/.tatami/datasets" % os.environ.get("HOME")):
self.__tatami_host = tatami_host
self.__data_directory_path = data_directory_path
def load_dataset(self, dataset_name, force=False):
downloader = self._downloader_for_dataset_name(dataset_name)
return downloader.maybe_download(dataset_name, self.__data_directory_path, force)
def get_path_for_dataset(self, dataset_name):
return "%s/%s" % (self.__data_directory_path, dataset_name)
def _downloader_for_dataset_name(self, dataset_name):
meta_data = self._retrieve_meta_data_for_dataset_name(dataset_name)
if meta_data['type'] == 'S3Dataset':
downloader = S3Downloader(
meta_data['bucket_name'],
meta_data['prefix'])
else:
raise Exception("Unrecognized datasource type %s" % meta_data['type'])
return downloader
def _retrieve_meta_data_for_dataset_name(self, dataset_name):
# XXX: Not Found Exception
request_url = "%s/datasets/%s.json" % (self.__tatami_host, dataset_name)
return json.loads(urlopen(request_url).read().decode('utf-8'))
| 35.153846
| 89
| 0.690737
|
4a0edd2dca30db164395440319ae9e2370a2d7b9
| 541
|
py
|
Python
|
zfs3backup/get.py
|
Anglelengyug/mmontagnan
|
e5d17749d0d7f23d3746637f86e17465d88bdbd7
|
[
"Apache-2.0"
] | 7
|
2019-09-21T23:07:46.000Z
|
2021-05-08T07:04:24.000Z
|
zfs3backup/get.py
|
Anglelengyug/mmontagnan
|
e5d17749d0d7f23d3746637f86e17465d88bdbd7
|
[
"Apache-2.0"
] | 2
|
2020-10-06T20:03:46.000Z
|
2020-10-07T16:14:50.000Z
|
zfs3backup/get.py
|
Anglelengyug/mmontagnan
|
e5d17749d0d7f23d3746637f86e17465d88bdbd7
|
[
"Apache-2.0"
] | 2
|
2019-12-18T03:16:04.000Z
|
2021-05-08T07:04:28.000Z
|
import argparse
import sys
import boto3
from zfs3backup.config import get_config
def download(bucket, name):
bucket.download_fileobj(name, sys.stdout)
def main():
cfg = get_config()
parser = argparse.ArgumentParser(
description='Read a key from s3 and write the content to stdout',
)
parser.add_argument('name', help='name of S3 key')
args = parser.parse_args()
s3 = boto3.resource('s3')
bucket = s3.Bucket(cfg['BUCKET'])
download(bucket, args.name)
if __name__ == '__main__':
main()
| 19.321429
| 73
| 0.676525
|
4a0edde5f7f4a5695f9f3845d024a988fcaf3081
| 470
|
py
|
Python
|
bibclean/updating/general.py
|
Svdvoort/BibClean
|
0d891d0dc0d0b335afdf3a09f4df6103d1e96215
|
[
"MIT"
] | null | null | null |
bibclean/updating/general.py
|
Svdvoort/BibClean
|
0d891d0dc0d0b335afdf3a09f4df6103d1e96215
|
[
"MIT"
] | 218
|
2020-11-20T08:20:01.000Z
|
2022-03-28T19:21:18.000Z
|
bibclean/updating/general.py
|
Svdvoort/BibClean
|
0d891d0dc0d0b335afdf3a09f4df6103d1e96215
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
def update_field(bib_entry, field_name, value):
if field_name not in bib_entry or bib_entry[field_name] != value:
logger.info("Changed field {} for {}".format(field_name, bib_entry["ID"]))
if field_name == "author" and len(value) < len(bib_entry["author"]):
logger.warning("Removed authors from {}".format(bib_entry["ID"]))
bib_entry[field_name] = value
return bib_entry
| 36.153846
| 82
| 0.680851
|
4a0edf458280743cb7ab99a91914158128a62a4d
| 3,793
|
py
|
Python
|
src/Products/ZCatalog/CatalogBrains.py
|
zopefoundation/Products.ZCatalog
|
d4fd9fe28d27e8ff43e911025c1258f1e8d50ad5
|
[
"ZPL-2.1"
] | 4
|
2018-09-13T22:10:22.000Z
|
2019-06-15T08:26:52.000Z
|
src/Products/ZCatalog/CatalogBrains.py
|
zopefoundation/Products.ZCatalog
|
d4fd9fe28d27e8ff43e911025c1258f1e8d50ad5
|
[
"ZPL-2.1"
] | 97
|
2015-02-05T11:58:41.000Z
|
2022-02-08T21:34:11.000Z
|
src/Products/ZCatalog/CatalogBrains.py
|
zopefoundation/Products.ZCatalog
|
d4fd9fe28d27e8ff43e911025c1258f1e8d50ad5
|
[
"ZPL-2.1"
] | 12
|
2015-04-03T05:30:24.000Z
|
2019-08-12T21:50:21.000Z
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from Acquisition import Implicit
from Acquisition import aq_base
from Acquisition import aq_get
from Acquisition import aq_inner
from Acquisition import aq_parent
from Record import Record
from zope.globalrequest import getRequest
from zope.interface import implementer
from ZPublisher.BaseRequest import RequestContainer
from .interfaces import ICatalogBrain
@implementer(ICatalogBrain)
class AbstractCatalogBrain(Record, Implicit):
"""Abstract base brain that handles looking up attributes as
required, and provides just enough smarts to let us get the URL, path,
and cataloged object without having to ask the catalog directly.
"""
def has_key(self, key):
return key in self.__record_schema__
def __contains__(self, name):
return name in self.__record_schema__
def getPath(self):
"""Get the physical path for this record"""
return aq_parent(aq_inner(self)).getpath(self.data_record_id_)
def getURL(self, relative=0):
"""Generate a URL for this record"""
request = aq_get(self, 'REQUEST', None)
if request is None:
request = getRequest()
return request.physicalPathToURL(self.getPath(), relative)
def _unrestrictedGetObject(self):
"""Return the object for this record
Same as getObject, but does not do security checks.
"""
parent = aq_parent(self)
if (aq_get(parent, 'REQUEST', None) is None):
request = getRequest()
if request is not None:
# path should be absolute, starting at the physical root
parent = self.getPhysicalRoot()
request_container = RequestContainer(REQUEST=request)
parent = aq_base(parent).__of__(request_container)
return parent.unrestrictedTraverse(self.getPath())
def getObject(self, REQUEST=None):
"""Return the object for this record
Will return None if the object cannot be found via its cataloged path
(i.e., it was deleted or moved without recataloging), or if the user is
not authorized to access the object.
This method mimicks a subset of what publisher's traversal does,
so it allows access if the final object can be accessed even
if intermediate objects cannot.
"""
path = self.getPath().split('/')
if not path:
return None
parent = aq_parent(self)
if (aq_get(parent, 'REQUEST', None) is None):
request = getRequest()
if request is not None:
# path should be absolute, starting at the physical root
parent = self.getPhysicalRoot()
request_container = RequestContainer(REQUEST=request)
parent = aq_base(parent).__of__(request_container)
if len(path) > 1:
parent = parent.unrestrictedTraverse(path[:-1])
return parent.restrictedTraverse(path[-1])
def getRID(self):
"""Return the record ID for this object."""
return self.data_record_id_
class NoBrainer:
""" This is an empty class to use when no brain is specified. """
pass
| 37.554455
| 79
| 0.64619
|
4a0ee27e5582993b808f664973edfefb02150dfd
| 753
|
py
|
Python
|
learning_logs/migrations/0003_entry.py
|
davidbezerra405/learning_log
|
e540fcc843d387f3484fab1db0a3b2e198ebf167
|
[
"MIT"
] | null | null | null |
learning_logs/migrations/0003_entry.py
|
davidbezerra405/learning_log
|
e540fcc843d387f3484fab1db0a3b2e198ebf167
|
[
"MIT"
] | null | null | null |
learning_logs/migrations/0003_entry.py
|
davidbezerra405/learning_log
|
e540fcc843d387f3484fab1db0a3b2e198ebf167
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0002_auto_20210726_1831'),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('text', models.TextField()),
('date_added', models.DateTimeField(auto_now_add=True)),
('topic', models.ForeignKey(to='learning_logs.Topic')),
],
options={
'verbose_name_plural': 'entries',
},
),
]
| 27.888889
| 114
| 0.559097
|
4a0ee2b8e2012806add4f3b5abd195c75792dab7
| 77,646
|
py
|
Python
|
torch/overrides.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | 1
|
2021-05-11T11:53:47.000Z
|
2021-05-11T11:53:47.000Z
|
torch/overrides.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | 1
|
2021-05-10T01:18:33.000Z
|
2021-05-10T01:18:33.000Z
|
torch/overrides.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | null | null | null |
"""
Python implementation of ``__torch_function__``
While most of the torch API and handling for ``__torch_function__`` happens
at the C++ level, some of the torch API is written in Python so we need
python-level handling for ``__torch_function__`` overrides as well. The main
developer-facing functionality in this file are handle_torch_function and
has_torch_function. See torch/functional.py and test/test_overrides.py
for usage examples.
Note
----
heavily inspired by NumPy's ``__array_function__`` (see:
https://github.com/pytorch/pytorch/issues/24015 and
https://www.numpy.org/neps/nep-0018-array-function-protocol.html
)
If changing this file in a way that can affect ``__torch_function__`` overhead,
please report the benchmarks in ``benchmarks/overrides_benchmark``. See the
instructions in the ``README.md`` in that directory.
"""
import __future__
import collections
import functools
import types
from typing import Dict, Set, List, Any, Callable, Iterable, Type
import torch
from torch._C import (
_has_torch_function, _has_torch_function_unary,
_has_torch_function_variadic, _add_docstr)
__all__ = [
"get_ignored_functions",
"get_overridable_functions",
"get_testing_overrides",
"handle_torch_function",
"has_torch_function",
"is_tensor_like",
"is_tensor_method_or_property",
"wrap_torch_function",
]
@functools.lru_cache(None)
def get_ignored_functions() -> Set[Callable]:
"""
Return public functions that cannot be overridden by ``__torch_function__``.
Returns
-------
Set[Callable]
A tuple of functions that are publicly available in the torch API but cannot
be overridden with ``__torch_function__``. Mostly this is because none of the
arguments of these functions are tensors or tensor-likes.
Examples
--------
>>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions()
True
>>> torch.add in torch.overrides.get_ignored_functions()
False
"""
Tensor = torch.Tensor
return {
torch.typename,
torch.is_tensor,
torch.is_storage,
torch.set_default_tensor_type,
torch.set_rng_state,
torch.get_rng_state,
torch.manual_seed,
torch.initial_seed,
torch.seed,
torch.save,
torch.load,
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
torch.set_anomaly_enabled,
torch.set_flush_denormal,
torch.set_num_interop_threads,
torch.set_num_threads,
torch.wait,
torch.as_tensor,
torch.from_numpy,
torch.get_device,
torch.tensor,
torch.default_generator,
torch.has_cuda,
torch.has_cudnn,
torch.has_lapack,
torch.device,
torch.dtype,
torch.finfo,
torch.has_mkl,
torch.has_mkldnn,
torch.has_openmp,
torch.iinfo,
torch.memory_format,
torch.qscheme,
torch.set_grad_enabled,
torch.no_grad,
torch.enable_grad,
torch.layout,
torch.align_tensors,
torch.arange,
torch.as_strided,
torch.bartlett_window,
torch.blackman_window,
torch.broadcast_shapes,
torch.can_cast,
torch.cudnn_affine_grid_generator,
torch.cudnn_batch_norm,
torch.cudnn_convolution,
torch.cudnn_convolution_transpose,
torch.cudnn_convolution_relu,
torch.cudnn_convolution_add_relu,
torch.cudnn_grid_sampler,
torch.cudnn_is_acceptable,
torch.empty,
torch.empty_strided,
torch.empty_quantized,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.from_file,
torch.full,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.mkldnn_adaptive_avg_pool2d,
torch.mkldnn_convolution,
torch.mkldnn_convolution_backward_weights,
torch.mkldnn_max_pool2d,
torch.mkldnn_max_pool3d,
torch.mkldnn_linear_backward_weights,
torch.normal,
torch.ones,
torch.promote_types,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.result_type,
torch.scalar_tensor,
torch.sparse_coo_tensor,
torch.sparse_csr_tensor,
torch.tril_indices,
torch.triu_indices,
torch.vander,
torch.zeros,
torch._jit_internal.boolean_dispatch,
torch.nn.functional.assert_int_or_pair,
torch.nn.functional.upsample,
torch.nn.functional.upsample_bilinear,
torch.nn.functional.upsample_nearest,
torch.nn.functional.has_torch_function,
torch.nn.functional.has_torch_function_unary,
torch.nn.functional.has_torch_function_variadic,
torch.nn.functional.handle_torch_function,
torch.nn.functional.sigmoid,
torch.nn.functional.hardsigmoid,
torch.nn.functional.tanh,
has_torch_function,
handle_torch_function,
torch.set_autocast_enabled,
torch.is_autocast_enabled,
torch.clear_autocast_cache,
torch.autocast_increment_nesting,
torch.autocast_decrement_nesting,
torch.nn.functional.hardswish,
torch.is_vulkan_available,
torch.is_deterministic,
torch.are_deterministic_algorithms_enabled,
torch.use_deterministic_algorithms,
torch.set_deterministic,
torch.unify_type_list,
torch.is_warn_always_enabled,
torch.set_warn_always,
torch.vitals_enabled,
torch.set_vital,
Tensor.__delitem__,
Tensor.__dir__,
Tensor.__getattribute__,
Tensor.__init__,
Tensor.__iter__,
Tensor.__init_subclass__,
Tensor.__delattr__,
Tensor.__setattr__,
Tensor.__torch_function__,
Tensor.__new__,
Tensor.__class__,
Tensor.__subclasshook__,
Tensor.as_subclass,
Tensor.reinforce,
Tensor.new,
Tensor.new_tensor,
Tensor.new_empty,
Tensor.new_empty_strided,
Tensor.new_zeros,
Tensor.new_ones,
Tensor.new_full,
Tensor._make_subclass,
Tensor.stride,
Tensor.unflatten,
Tensor.to_sparse_csr,
Tensor._reduce_ex_internal,
}
@functools.lru_cache(None)
def get_testing_overrides() -> Dict[Callable, Callable]:
"""Return a dict containing dummy overrides for all overridable functions
Returns
-------
Dict[Callable, Callable]
A dictionary that maps overridable functions in the PyTorch API to
lambda functions that have the same signature as the real function
and unconditionally return -1. These lambda functions are useful
for testing API coverage for a type that defines ``__torch_function__``.
Examples
--------
>>> import inspect
>>> my_add = torch.overrides.get_testing_overrides()[torch.add]
>>> inspect.signature(my_add)
<Signature (input, other, out=None)>
"""
# Every function in the PyTorchAPI that can be overriden needs an entry
# in this dict.
#
# Optimally we would use inspect to get the function signature and define
# the lambda function procedurally but that is blocked by generating
# function signatures for native kernels that can be consumed by inspect.
# See Issue #28233.
Tensor = torch.Tensor
ret: Dict[Callable, Callable] = {
torch.abs: lambda input, out=None: -1,
torch.absolute: lambda input, out=None: -1,
torch.adaptive_avg_pool1d: lambda input, output_size: -1,
torch.adaptive_max_pool1d: lambda inputs, output_size: -1,
torch.acos: lambda input, out=None: -1,
torch.arccos: lambda input, out=None: -1,
torch.acosh: lambda input, out=None: -1,
torch.arccosh: lambda input, out=None: -1,
torch.add: lambda input, other, out=None: -1,
torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1,
torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1,
torch.affine_grid_generator: lambda theta, size, align_corners: -1,
torch.all: lambda input, dim=None: -1,
torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.alpha_dropout: lambda input, p, train, inplace=False: -1,
torch.amax: lambda input, dim=None: -1,
torch.amin: lambda input, dim=None: -1,
torch.angle: lambda input, out=None: -1,
torch.any: lambda input, dim=None, keepdim=False, out=None: -1,
torch.argmax: lambda input: -1,
torch.argmin: lambda input: -1,
torch.argsort: lambda input, dim=None: -1,
torch.asin: lambda input, out=None: -1,
torch._assert_async: lambda input: -1,
torch.arcsin: lambda input, out=None: -1,
torch.asinh: lambda input, out=None: -1,
torch.arcsinh: lambda input, out=None: -1,
torch.atan: lambda input, out=None: -1,
torch.arctan: lambda input, out=None: -1,
torch.atan2: lambda input, other, out=None: -1,
torch.atanh: lambda input, out=None: -1,
torch.arctanh: lambda input, out=None: -1,
torch.atleast_1d: lambda *tensors: -1,
torch.atleast_2d: lambda *tensors: -1,
torch.atleast_3d: lambda *tensors: -1,
torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True: -1,
torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled: -1,
torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count_tensor: -1,
torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g: -1,
torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1,
torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_stats: lambda input, eps: -1,
torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1,
torch.bernoulli: lambda input, generator=None, out=None: -1,
torch.bilinear: lambda input1, input2, weight, bias: -1,
torch.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction='mean', pos_weight=None: -1),
torch.bincount: lambda input, weights=None, minlength=0: -1,
torch.binomial: lambda count, prob, generator=None: -1,
torch.bitwise_and: lambda input, other, out=None: -1,
torch.bitwise_not: lambda input, out=None: -1,
torch.bitwise_or: lambda input, other, out=None: -1,
torch.bitwise_xor: lambda input, other, out=None: -1,
torch.block_diag: lambda *tensors: -1,
torch.bmm: lambda input, mat2, out=None: -1,
torch.broadcast_tensors: lambda *tensors: -1,
torch.broadcast_to: lambda self, size: -1,
torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1,
torch.cartesian_prod: lambda *tensors: -1,
torch.cat: lambda tensors, dim=0, out=None: -1,
torch.cdist: lambda x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary': -1,
torch.ceil: lambda input, out=None: -1,
torch.celu: lambda input, alhpa=1., inplace=False: -1,
torch.chain_matmul: lambda *matrices, out=None: -1,
torch.channel_shuffle: lambda input, groups : -1,
torch.cholesky: lambda input, upper=False, out=None: -1,
torch.linalg.cholesky: lambda input, out=None: -1,
torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1,
torch.cholesky_inverse: lambda input, upper=False, out=None: -1,
torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1,
torch.chunk: lambda input, chunks, dim=0: -1,
torch.clamp: lambda input, min=None, max=None, out=None: -1,
torch.clip: lambda input, min=None, max=None, out=None: -1,
torch.clamp_min: lambda input, min, out=None: -1,
torch.clamp_max: lambda input, max, out=None: -1,
torch.column_stack: lambda tensors, out=None: -1,
torch.clone: lambda input: -1,
torch.combinations: lambda input, r=2, with_replacement=False: -1,
torch.complex: lambda real, imag: -1,
torch.copysign: lambda input, other, out=None: -1,
torch.polar: lambda abs, ang: -1,
torch.linalg.cond: lambda input, ord=None: -1,
torch.conj: lambda input, out=None: -1,
torch.constant_pad_nd: lambda input, pad, value=0: -1,
torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_adding, groups: -1,
torch.conv_tbc: lambda input, weight, bias, pad=0: -1,
torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.cos: lambda input, out=None: -1,
torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1,
torch.cosh: lambda input, out=None: -1,
torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1,
torch.count_nonzero: lambda input: -1,
torch.cross: lambda input, other, dim=-1, out=None: -1,
torch.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean',
zero_infinity=False: -1),
torch.cummax: lambda input, dim, out=None: -1,
torch.cummin: lambda input, dim, out=None: -1,
torch.cumprod: lambda input, dim, out=None, dtype=None: -1,
torch.cumsum: lambda input, dim, out=None, dtype=None: -1,
torch.logcumsumexp: lambda input, dim, out=None: -1,
torch.deg2rad: lambda input, out=None: -1,
torch.dequantize: lambda input: -1,
torch.det: lambda input: -1,
torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined]
torch.detach: lambda input: -1,
torch.diag: lambda input, diagonal=0, out=None: -1,
torch.diag_embed: lambda input, diagonal=0, out=None: -1,
torch.diagflat: lambda input, offset=0: -1,
torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1,
torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1,
torch.digamma: lambda input, out=None: -1,
torch.dist: lambda input, other, p=2: -1,
torch.div: lambda input, other, rounding_mode=None, out=None: -1,
torch.divide: lambda input, other, rounding_mode=None, out=None: -1,
torch.dot: lambda input, other, out=None: -1,
torch.dropout: lambda input, p, train, inplace=False: -1,
torch.dsmm: lambda input, mat2: -1,
torch.hsmm: lambda mat1, mat2: -1,
torch.dsplit: lambda input, indices_or_sections: -1,
torch.dstack: lambda tensors, out=None: -1,
torch.eig: lambda input, eigenvectors=False, out=None: -1,
torch.linalg.eig: lambda input, out=None: -1,
torch.linalg.eigvals: lambda input, out=None: -1,
torch.linalg.eigh: lambda input, UPLO="L", out=None: -1,
torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1,
torch.einsum: lambda equation, *operands: -1,
torch.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
sparse=False: -1),
torch.embedding_bag: (lambda input, weight, offsets, max_norm=None, norm_type=2, scale_grad_by_freq=False,
mode='mean', sparse=False, per_sample_weights=None, padding_idx=None: -1),
torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.eq: lambda input, other, out=None: -1,
torch.equal: lambda input, other: -1,
torch.erf: lambda input, out=None: -1,
torch.erfc: lambda input, out=None: -1,
torch.erfinv: lambda input, out=None: -1,
torch.exp: lambda input, out=None: -1,
torch.exp2: lambda input, out=None: -1,
torch.expm1: lambda input, out=None: -1,
torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_max: -1,
torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1,
torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias: -1,
torch.fbgemm_linear_int8_weight_fp32_activation: (lambda input, weight, packed, col_offsets, weight_scale,
weight_zero_point, bias: -1),
torch.fbgemm_linear_quantize_weight: lambda input: -1,
torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1,
torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1,
torch.feature_alpha_dropout: lambda input, p, train: -1,
torch.feature_dropout: lambda input, p, train: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.fftshift: lambda input, dim=None: -1,
torch.fft.ifftshift: lambda input, dim=None: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fix: lambda input, out=None: -1,
torch.flatten: lambda input, start_dim=0, end_dim=-1: -1,
torch.flip: lambda input, dims: -1,
torch.fliplr: lambda input: -1,
torch.flipud: lambda input: -1,
torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
torch.floor: lambda input, out=None: -1,
torch.floor_divide: lambda input, other: -1,
torch.float_power: lambda input, exponent, out=None: -1,
torch.fmod: lambda input, other, out=None: -1,
torch.frac: lambda input, out=None: -1,
torch.frexp: lambda input, out=None: -1,
torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.functional.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1,
torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
torch.gcd: lambda input, other, out=None: -1,
torch.ge: lambda input, other, out=None: -1,
torch.greater_equal: lambda input, other, out=None: -1,
torch.geqrf: lambda input, out=None: -1,
torch.i0: lambda input, out=None: -1,
torch.inner: lambda input, other, out=None: -1,
torch.outer: lambda input, vec2, out=None: -1, # alias for torch.ger
torch.ger: lambda input, vec2, out=None: -1,
torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True: -1,
torch.gru: lambda input, hx, params, has_biases, num_layers, gropout, train, bidirectional, batch_first: -1,
torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.gt: lambda input, other, out=None: -1,
torch.greater: lambda input, other, out=None: -1,
torch.hardshrink: lambda input, lambd=0.5: -1,
torch.heaviside: lambda input, values, out=None: -1,
torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1,
torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
torch.linalg.householder_product: lambda input, tau: -1,
torch.hspmm: lambda mat1, mat2, out=None: -1,
torch.hsplit: lambda input, indices_or_sections: -1,
torch.hstack: lambda tensors, out=None: -1,
torch.hypot: lambda input, other, out=None: -1,
torch.igamma: lambda input, other, out=None: -1,
torch.igammac: lambda input, other, out=None: -1,
torch.imag: lambda input, out=None: -1,
torch.index_add: lambda input, dim, index, source: -1,
torch.index_copy: lambda input, dim, index, source: -1,
torch.index_put: lambda input, indices, values, accumulate=False: -1,
torch.index_select: lambda input, dim, index, out=None: -1,
torch.index_fill: lambda input, dim, index, value: -1,
torch.isfinite: lambda tensor: -1,
torch.isinf: lambda tensor: -1,
torch.isreal: lambda tensor: -1,
torch.isposinf: lambda input, out=None: -1,
torch.isneginf: lambda input, out=None: -1,
torch.instance_norm: (lambda input, running_mean, running_var, weight, bias, use_input_stats, momentum, eps,
cudnn_enabled: -1),
torch.int_repr: lambda input: -1,
torch.inverse: lambda input, out=None: -1,
torch.linalg.inv: lambda input, out=None: -1,
torch.is_complex: lambda input: -1,
torch.is_distributed: lambda input: -1,
torch.is_floating_point: lambda input: -1,
torch.is_nonzero: lambda input: -1,
torch.is_same_size: lambda input, other: -1,
torch.is_signed: lambda input: -1,
torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.isnan: lambda input: -1,
torch.istft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
normalized=False, onesided=None, length=None, return_complex=False: -1),
torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.kron: lambda input, other: -1,
torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1,
torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1,
torch.lcm: lambda input, other, out=None: -1,
torch.ldexp: lambda input, other, out=None: -1,
torch.le: lambda input, other, out=None: -1,
torch.less_equal: lambda input, other, out=None: -1,
torch.lerp: lambda input, end, weight, out=None: -1,
torch.lgamma: lambda input, out=None: -1,
torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None,
tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None: -1,
torch.log: lambda input, out=None: -1,
torch.log_softmax: lambda input, dim, dtype=None: -1,
torch.log10: lambda input, out=None: -1,
torch.log1p: lambda input, out=None: -1,
torch.log2: lambda input, out=None: -1,
torch.logaddexp: lambda input, other, out=None: -1,
torch.logaddexp2: lambda input, other, out=None: -1,
torch.logdet: lambda input: -1,
torch.xlogy: lambda x, y: -1,
torch.logical_and: lambda input, other, out=None: -1,
torch.logical_not: lambda input, out=None: -1,
torch.logical_or: lambda input, other, out=None: -1,
torch.logical_xor: lambda input, other, out=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.logit: lambda input, eps=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional: -1,
torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.lstsq: lambda input, A, out=None: -1,
torch.lt: lambda input, other, out=None: -1,
torch.less: lambda input, other, out=None: -1,
torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1,
torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, # type: ignore[attr-defined] # noqa: B950
torch.masked_fill: lambda input, mask, value: -1,
torch.masked_scatter: lambda input, mask, source: -1,
torch.masked_select: lambda input, mask, out=None: -1,
torch.matmul: lambda input, other, out=None: -1,
torch.matrix_power: lambda input, n: -1,
torch.linalg.matrix_power: lambda input, n, out=None: -1,
torch.matrix_rank: lambda input, tol=None, symmetric=False: -1,
torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
torch.linalg.multi_dot: lambda tensors, out=None: -1,
torch.matrix_exp: lambda input: -1,
torch.max: lambda input, out=None: -1,
torch.maximum: lambda input, other, out=None: -1,
torch.fmax: lambda input, other, out=None: -1,
torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.mean: lambda input, dim=None: -1,
torch.median: lambda input, dim=None: -1,
torch.nanmedian: lambda input, dim=None: -1,
torch.meshgrid: lambda *tensors, **kwargs: -1,
torch.min: lambda input, out=None: -1,
torch.minimum: lambda input, other, out=None: -1,
torch.fmin: lambda input, other, out=None: -1,
torch.miopen_batch_norm: (lambda input, weight, bias, running_mean, running_var, training,
exponential_average_factor, epsilon: -1),
torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark, deterministic: -1,
torch.miopen_convolution_transpose: (lambda input, weight, bias, padding, output_padding, stride, dilation,
groups, benchmark, deterministic: -1),
torch.miopen_depthwise_convolution: (lambda input, weight, bias, padding, stride, dilation, groups, benchmark,
deterministic: -1),
torch.miopen_rnn: (lambda input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first,
dropout, train, bidirectional, batch_sizes, dropout_state: -1),
torch.mm: lambda input, mat2, out=None: -1,
torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1,
torch.movedim: lambda input, source, destination: -1,
torch.moveaxis: lambda input, source, destination: -1,
torch.msort: lambda input, descending=False, out=None: -1,
torch.mul: lambda input, other, out=None: -1,
torch.multiply: lambda input, other, out=None: -1,
torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1,
torch.mv: lambda input, vec, out=None: -1,
torch.mvlgamma: lambda input, p: -1,
torch.narrow: lambda input, dim, start, length: -1,
torch.narrow_copy: lambda input, dim, start, length: -1,
torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1,
torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps: -1,
torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
torch.ne: lambda input, other, out=None: -1,
torch.not_equal: lambda input, other, out=None: -1,
torch.neg: lambda input, out=None: -1,
torch.negative: lambda input, out=None: -1,
torch.nextafter: lambda input, other, out=None: -1,
torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,
torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1,
torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.avg_pool2d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.avg_pool3d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.batch_norm: (lambda input, running_mean, running_var, weight=None, bias=None, training=False,
momentum=0.1, eps=1e-05: -1),
torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1,
torch.nn.functional.binary_cross_entropy: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction="mean": -1),
torch.nn.functional.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction="mean", pos_weight=None: -1),
torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.cosine_embedding_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.cross_entropy: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction="mean": -1),
torch.nn.functional.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0,
reduction='mean', zero_infinity=False: -1),
torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0,
scale_grad_by_freq=False, sparse=False: -1),
torch.nn.functional.embedding_bag: (lambda input, weight, offsets=None, max_norm=None, norm_type=2,
scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None,
include_last_offset=False, padding_idx=None: -1),
torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nn.functional.fractional_max_pool2d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool2d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction='mean': -1,
torch.nn.functional.gelu: lambda input: -1,
torch.nn.functional.glu: lambda input, dim=-1: -1,
torch.nn.functional.grid_sample: lambda input, grid, mode='bilinear', padding_mode='zeros', align_corners=None: -1,
torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1,
torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.hardtanh: lambda input, min_val=-1., max_val=1., inplace=False: -1,
torch.nn.functional.hinge_embedding_loss: (lambda input, target, margin=1.0, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.instance_norm: (lambda input, running_mean=None, running_var=None, weight=None, bias=None,
use_input_stats=True, momentum=0.1, eps=1e-05: -1),
torch.nn.functional.interpolate: (lambda input, size=None, scale_factor=None, mode='nearest', align_corners=None,
recompute_scale_factor=None: -1),
torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1,
torch.nn.functional.linear: lambda input, weight, bias=None: -1,
torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1,
torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.logsigmoid: lambda input: -1,
torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.margin_ranking_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.max_pool1d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool2d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool2d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.multi_head_attention_forward: (
lambda query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v,
add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None,
need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None,
v_proj_weight=None, static_k=None, static_v=None: -1),
torch.nn.functional.multi_margin_loss: (lambda input, target, p=1, margin=1.0, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.multilabel_margin_loss: (lambda input, target, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.multilabel_soft_margin_loss: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.nll_loss: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean': -1),
torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1,
torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1,
torch.nn.functional.pad: lambda input, pad, mode='constant', value=0: -1,
torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.nn.functional.poisson_nll_loss: (lambda input, target, log_input=True, full=False, size_average=None,
eps=1e-08, reduce=None, reduction='mean': -1),
torch.nn.functional.prelu: lambda input, weight: -1,
torch.nn.functional.relu: lambda input, inplace=False: -1,
torch.nn.functional.relu6: lambda input, inplace=False: -1,
torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, inplace=False: -1,
torch.nn.functional.selu: lambda input, inplace=False: -1,
torch.nn.functional.silu: lambda input, inplace=False: -1,
torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean', beta=1.: -1,
torch.nn.functional.huber_loss: lambda input, target, reduction='mean', delta=1.: -1,
torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1,
torch.nn.functional.softshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.softsign: lambda input: -1,
torch.nn.functional.tanhshrink: lambda input: -1,
torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1,
torch.nn.functional.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06,
swap=False, size_average=None, reduce=None, reduction='mean': -1),
torch.nn.functional.triplet_margin_with_distance_loss: (lambda anchor, positive, negative, *,
distance_function=None, margin=1.0,
swap=False, reduction='mean': -1),
torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nonzero: lambda input, as_tuple=False: -1,
torch.norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.matrix_norm: lambda input, ord='fro', dim=(-2, -1), keepdim=False, out=None, dtype=None: -1,
torch.norm_except_dim: lambda v, pow=2, dim=0: -1,
torch.nuclear_norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.numel: lambda input: -1,
torch.orgqr: lambda input, tau: -1,
torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1,
torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.permute: lambda self, dim: -1,
torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1,
torch.pdist: lambda input, p=2: -1,
torch.pinverse: lambda input, rcond=1e-15: -1,
torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1,
torch.pixel_shuffle: lambda input, upscale_factor: -1,
torch.pixel_unshuffle: lambda input, downscale_factor: -1,
torch.poisson: lambda input, generator=None: -1,
torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1,
torch.polygamma: lambda input, n, out=None: -1,
torch.positive: lambda input, out=None: -1,
torch.prelu: lambda input, weight: -1,
torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.pow: lambda input, exponent, out=None: -1,
torch.prod: lambda input, dtype=None: -1,
torch.put: lambda input, index, source, accumulate=False: -1,
torch.q_per_channel_axis: lambda input: -1,
torch.q_per_channel_scales: lambda input: -1,
torch.q_per_channel_zero_points: lambda input: -1,
torch.q_scale: lambda input: -1,
torch.q_zero_point: lambda input: -1,
torch.qr: lambda input, some=True, out=None: -1,
torch.linalg.qr: lambda input, mode='reduced', out=None: -1,
torch.quantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.nanquantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_point: -1,
torch.quantized_gru_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_lstm_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_max_pool1d: (lambda input, kernel_size, stride=tuple(), padding=(0,),
dilation=(1,), ceil_mode=False: -1),
torch.quantized_max_pool2d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0),
dilation=(1, 1), ceil_mode=False: -1),
torch.quantized_rnn_relu_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_rnn_tanh_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.rad2deg: lambda input, out=None: -1,
torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.ravel: lambda input: -1,
torch.real: lambda input, out=None: -1,
torch.vdot: lambda input, other, out=None: -1,
torch.view_as_real: lambda input: -1,
torch.view_as_complex: lambda input: -1,
torch.reciprocal: lambda input, out=None: -1,
torch.relu: lambda input, inplace=False: -1,
torch.remainder: lambda input, other, out=None: -1,
torch.renorm: lambda input, p, dim, maxnorm, out=None: -1,
torch.repeat_interleave: lambda input, dim=None: -1,
torch.reshape: lambda input, shape: -1,
torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.roll: lambda input, shifts, dims=None: -1,
torch.rot90: lambda input, k=1, dims=(0, 1): -1,
torch.round: lambda input, out=None: -1,
torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack
torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1),
torch.rrelu: lambda input, lower=1. / 8, upper=1. / 3, training=False, inplace=False: -1,
torch.rsqrt: lambda input, out=None: -1,
torch.rsub: lambda input, other, alpha=1: -1,
torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.scatter: lambda input, dim, index, src: -1,
torch.scatter_add: lambda input, dim, index, src: -1,
torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1,
torch.segment_reduce: lambda data, reduce="max", lengths=None, indices=None, axis=0, unsafe=False: -1,
torch.select: lambda input, dim, index: -1,
torch.selu: lambda input, inplace=False: -1,
torch.sigmoid: lambda input, out=None: -1,
torch.sign: lambda input, out=None: -1,
torch.signbit: lambda input, out=None: -1,
torch.sgn: lambda input, out=None: -1,
torch.sin: lambda input, out=None: -1,
torch.sinc: lambda input, out=None: -1,
torch.sinh: lambda input, out=None: -1,
torch.slogdet: lambda input: -1,
torch.linalg.slogdet: lambda input: -1,
torch.smm: lambda input, mat2: -1,
torch.spmm: lambda input, mat2: -1,
torch.softmax: lambda input, dim, dtype=None: -1,
torch.solve: lambda input, A, out=None: -1,
torch.linalg.solve: lambda input, other, out=None: -1,
torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1,
torch.split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.sqrt: lambda input, out=None: -1,
torch.square: lambda input, out=None: -1,
torch.squeeze: lambda input, dim=None, out=None: -1,
torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.stack: lambda tensors, dim=0, out=None: -1,
torch.std: lambda input, dim=None: -1,
torch.std_mean: lambda input, dim=None: -1,
torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
pad_mode='reflect', normalized=False, onesided=True, return_complex=None: -1),
torch.sub: lambda input, other, out=None: -1,
torch.subtract: lambda input, other, out=None: -1,
torch.sum: lambda input, dim=None: -1,
torch.nansum: lambda input, dim=None: -1,
torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,
torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1,
torch.linalg.svd: lambda input, full_matrices=True, out=None: -1,
torch.linalg.svdvals: lambda input, out=None: -1,
torch.symeig: lambda input, eigenvectors=False, upper=True, out=None: -1,
torch.swapaxes: lambda input, dim0, dim1: -1,
torch.swapdims: lambda input, axis0, axis1: -1,
torch.special.entr: lambda input: -1,
torch.special.erf: lambda input: -1,
torch.special.erfc: lambda input: -1,
torch.special.erfinv: lambda input: -1,
torch.special.exp2: lambda input: -1,
torch.special.expm1: lambda input: -1,
torch.special.expit: lambda input: -1,
torch.special.gammaln: lambda input: -1,
torch.special.i0e: lambda input: -1,
torch.special.logit: lambda input: -1,
torch.special.xlog1py: lambda input, other, out=None: -1,
torch.t: lambda input: -1,
torch.take: lambda input, index: -1,
torch.take_along_dim: lambda input, indices, dim=None, out=None: -1,
torch.tan: lambda input, out=None: -1,
torch.tanh: lambda input, out=None: -1,
torch.linalg.tensorinv: lambda a, ind=2: -1,
torch.linalg.tensorsolve: lambda a, b, dims=None: -1,
torch.tensordot: lambda a, b, dims=2, out=None: -1,
torch.tensor_split: lambda input, indices_or_sections, dim=0: -1,
torch.threshold: lambda input, threshold, value, inplace=False: -1,
torch.tile: lambda input, dims: -1,
torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1,
torch.trace: lambda input: -1,
torch.transpose: lambda input, dim0, dim1: -1,
torch.trapz: lambda y, x=None, dim=-1: -1,
torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1,
torch.tril: lambda input, diagonal=0, out=None: -1,
torch.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False,
size_average=None, reduce=None, reduction='mean': -1),
torch.triu: lambda input, diagonal=0, out=None: -1,
torch.true_divide: lambda input, other: -1,
torch.trunc: lambda input, out=None: -1,
torch.unbind: lambda input, dim=0: -1,
torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1,
torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1,
torch.unsafe_chunk: lambda input, chunks, dim=0: -1,
torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsqueeze: lambda input, dim, out=None: -1,
torch.var: lambda input, dim=None: -1,
torch.var_mean: lambda input, dim=None: -1,
torch.vsplit: lambda input, indices_or_sections: -1,
torch.vstack: lambda tensors, out=None: -1,
torch.where: lambda condition, x=None, y=None: -1,
torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
Tensor.__floordiv__: lambda self, other: -1,
Tensor.__rfloordiv__: lambda self, other: -1,
Tensor.__ifloordiv__: lambda self, other: -1,
Tensor.__truediv__: lambda self, other: -1,
Tensor.__rtruediv__: lambda self, other: -1,
Tensor.__itruediv__: lambda self, other: -1,
Tensor.__lshift__: lambda self, other: -1,
Tensor.__ilshift__: lambda self, other: -1,
Tensor.__rshift__: lambda self, other: -1,
Tensor.__irshift__: lambda self, other: -1,
Tensor.__float__: lambda self: -1,
Tensor.__complex__: lambda self: -1,
Tensor.__array__: lambda self, dtype: -1,
Tensor.__bool__: lambda self: -1,
Tensor.__contains__: lambda self, other: -1,
Tensor.__neg__: lambda self: -1,
Tensor.__invert__: lambda self: -1,
Tensor.__mod__: lambda self, other: -1,
Tensor.__imod__: lambda self, other: -1,
Tensor.__array_wrap__: lambda self, array: -1,
Tensor.__getitem__: lambda self, idx: -1,
Tensor.__deepcopy__: lambda self, memo: -1,
Tensor.__int__: lambda self: -1,
Tensor.__long__: lambda self: -1,
Tensor.__hash__: lambda self: -1,
Tensor.__index__: lambda self: -1,
Tensor.__len__: lambda self: -1,
Tensor.__format__: lambda self, format_spec: -1,
Tensor.__reduce_ex__: lambda self, proto: -1,
Tensor.__reversed__: lambda self: -1,
Tensor.__repr__: lambda self: -1,
Tensor.__setitem__: lambda self, k, v: -1,
Tensor.__setstate__: lambda self, d: -1,
Tensor.T.__get__: lambda self: -1,
Tensor._backward_hooks.__get__: lambda self: -1,
Tensor._base.__get__: lambda self: -1,
Tensor._cdata.__get__: lambda self: -1,
Tensor.grad.__get__: lambda self: -1,
Tensor._grad.__get__: lambda self: -1,
Tensor._grad_fn.__get__: lambda self: -1,
Tensor.grad_fn.__get__: lambda self: -1,
Tensor._version.__get__: lambda self: -1,
Tensor.data.__get__: lambda self: -1,
Tensor.device.__get__: lambda self: -1,
Tensor.dtype.__get__: lambda self: -1,
Tensor.is_cuda.__get__: lambda self: -1,
Tensor.is_xpu.__get__: lambda self: -1,
Tensor.is_leaf.__get__: lambda self: -1,
Tensor.is_meta.__get__: lambda self: -1,
Tensor.is_mlc.__get__: lambda self: -1,
Tensor.is_mkldnn.__get__: lambda self: -1,
Tensor.is_quantized.__get__: lambda self: -1,
Tensor.is_sparse.__get__: lambda self: -1,
Tensor.is_sparse_csr.__get__: lambda self: -1,
Tensor.is_vulkan.__get__: lambda self: -1,
Tensor.layout.__get__: lambda self: -1,
Tensor.name.__get__: lambda self: -1,
Tensor.names.__get__: lambda self: -1,
Tensor.ndim.__get__: lambda self: -1,
Tensor.output_nr.__get__: lambda self: -1,
Tensor.requires_grad.__get__: lambda self: -1,
Tensor.shape.__get__: lambda self: -1,
Tensor.volatile.__get__: lambda self: -1,
Tensor.real.__get__: lambda self: -1,
Tensor.imag.__get__: lambda self: -1,
Tensor.__cuda_array_interface__.__get__: lambda self: -1,
Tensor.type: lambda self, dtype=None, non_blocking=False, **kwargs: -1,
Tensor._coalesced_: lambda self: -1,
Tensor._dimI: lambda self: -1,
Tensor._dimV: lambda self: -1,
Tensor._indices: lambda self: -1,
Tensor._is_view: lambda self: -1,
Tensor._nnz: lambda self: -1,
Tensor.crow_indices: lambda self: -1,
Tensor.col_indices: lambda self: -1,
Tensor._update_names: lambda self, names, inplace: -1,
Tensor._values: lambda self: -1,
Tensor.align_as: lambda self, other: -1,
Tensor.align_to: lambda self, order, ellipsis_idx: -1,
Tensor.apply_: lambda self, callable: -1,
Tensor.as_strided: lambda self, size, stride: -1,
Tensor.as_strided_: lambda self, size, stride: -1,
Tensor.backward: lambda self, gradient=None, retain_graph=None, create_graph=False, inputs=None: -1,
Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1,
Tensor.bool: lambda self, memory_format=torch.preserve_format: -1,
Tensor.byte: lambda self, memory_format=torch.preserve_format: -1,
Tensor.char: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cauchy_: lambda self, median=0, sigma=1, *, generator=None: -1,
Tensor.coalesce: lambda self: -1,
Tensor._coalesced_: lambda self, coalesced: -1,
Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1,
Tensor.copy_: lambda self, src, non_blocking=False: -1,
Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1,
Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.data_ptr: lambda self: -1,
Tensor.dense_dim: lambda self: -1,
Tensor.dim: lambda self: -1,
Tensor.double: lambda self, memory_format=torch.preserve_format: -1,
Tensor.element_size: lambda self: -1,
Tensor.expand: lambda self, size: -1,
Tensor.expand_as: lambda self, other: -1,
Tensor.exponential_: lambda self, lambd=1, *, generator=None: -1,
Tensor.fill_: lambda self, value: -1,
Tensor.fill_diagonal_: lambda self, value: -1,
Tensor.float: lambda self, memory_format=torch.preserve_format: -1,
Tensor.geometric_: lambda self, p, *, generator=None: -1,
Tensor.get_device: lambda self: -1,
Tensor.half: lambda self, memory_format=torch.preserve_format: -1,
Tensor.has_names: lambda self: -1,
Tensor.indices: lambda self: -1,
Tensor.int: lambda self, memory_format=torch.preserve_format: -1,
Tensor.is_coalesced: lambda self: -1,
Tensor.is_contiguous: lambda self: -1,
Tensor.is_pinned: lambda self: -1,
Tensor.is_set_to: lambda self, tensor: -1,
Tensor.is_shared: lambda self: -1,
Tensor.item: lambda self: -1,
Tensor.log_normal_: lambda self, mean=1, std=2, *, generator=None: -1,
Tensor.log_softmax: lambda self, dim: -1,
Tensor.long: lambda self, memory_format=torch.preserve_format: -1,
Tensor.map_: lambda self, tensor, callable: -1,
Tensor.map2_: lambda self, x, y, callable: -1,
Tensor.mm: lambda self, mat2: -1,
Tensor.narrow_copy: lambda self, dimension, start, length: -1,
Tensor.ndimension: lambda self: -1,
Tensor.nelement: lambda self: -1,
Tensor.normal_: lambda self: -1,
Tensor.numpy: lambda self: -1,
Tensor.permute: lambda self, dim: -1,
Tensor.pin_memory: lambda self: -1,
Tensor.put_: lambda self, indices, tensor, accumulate=False: -1,
Tensor.qscheme: lambda self: -1,
Tensor.random_: lambda self, from_=0, to=None, *, generator=None: -1,
Tensor.record_stream: lambda self, stream: -1,
Tensor.refine_names: lambda self, names: -1,
Tensor.register_hook: lambda self, hook: -1,
Tensor.rename: lambda self, name: -1,
Tensor.repeat: lambda self, *size: -1,
Tensor.requires_grad_: lambda self, requires_grad=True: -1,
Tensor.reshape_as: lambda self, other: -1,
Tensor.resize: lambda self, *size: -1,
Tensor.resize_: lambda self, size: -1,
Tensor.resize_as: lambda self, other: -1,
Tensor.retain_grad: lambda self: -1,
Tensor.set_: lambda self, source=None, storage_offset=0, size=None, stride=None: -1,
Tensor.share_memory_: lambda self: -1,
Tensor.short: lambda self, memory_format=torch.preserve_format: -1,
Tensor.size: lambda self: -1,
Tensor.sparse_dim: lambda self: -1,
Tensor.sparse_mask: lambda self, mask: -1,
Tensor.sparse_resize_: lambda self, size1, size2, dense_dim: -1,
Tensor.sparse_resize_and_clear_: lambda self, size1, size2, dense_dim: -1,
Tensor.sspaddmm: lambda self, mat1, mat2, beta=1, alpha=1, out=None: -1,
Tensor.storage: lambda self: -1,
Tensor.storage_offset: lambda self: -1,
Tensor.storage_type: lambda self: -1,
Tensor.sum_to_size: lambda self, size: -1,
Tensor.tile: lambda self, *reps: -1,
Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format: -1,
Tensor.to_dense: lambda self: -1,
Tensor.to_sparse: lambda self: -1,
Tensor.tolist: lambda self: -1,
Tensor.to_mkldnn: lambda self: -1,
Tensor.type_as: lambda self, other: -1,
Tensor.unfold: lambda self, dimension, size, step: -1,
Tensor.uniform_: lambda self, from_=0, to=1: -1,
Tensor.values: lambda self: -1,
Tensor.view: lambda self, shape: -1,
Tensor.view_as: lambda self, other: -1,
Tensor.zero_: lambda self: -1,
torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1,
}
ret2 = {}
ignored = get_ignored_functions()
for k, v in ret.items():
# Generate methods like __add__ and add_ by default from add
names = [
k.__name__, # Default method
k.__name__ + "_", # Inplace variant
"__" + k.__name__ + "__", # Dunder method
"__i" + k.__name__ + "__", # Inplace dunder method
"__r" + k.__name__ + "__", # Reverse dunder method
]
if k.__name__.startswith("bitwise_"):
# bitwise_<op> have dunder methods of the form __<op>__
# And so on.
subname = k.__name__[len("bitwise_"):]
names.extend([
"__" + subname + "__",
"__i" + subname + "__",
"__r" + subname + "__"
])
for name in names:
func = getattr(Tensor, name, None)
if callable(func) and func not in ret and func not in ignored:
ret2[func] = v
ret.update(ret2)
return ret
def wrap_torch_function(dispatcher: Callable):
"""Wraps a given function with ``__torch_function__`` -related functionality.
Parameters
----------
dispatcher: Callable
A callable that returns an iterable of Tensor-likes passed into the function.
Note
----
This decorator may reduce the performance of your code. Generally, it's enough to express
your code as a series of functions that, themselves, support __torch_function__. If you
find yourself in the rare situation where this is not the case, e.g. if you're wrapping a
low-level library and you also need it to work for Tensor-likes, then this function is available.
Examples
--------
>>> def dispatcher(a): # Must have the same signature as func
... return (a,)
>>> @torch.overrides.wrap_torch_function(dispatcher)
>>> def func(a): # This will make func dispatchable by __torch_function__
... return a + 0
"""
def inner(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
if has_torch_function(relevant_args):
return handle_torch_function(func, relevant_args, *args, **kwargs)
return func(*args, **kwargs)
return wrapped
return inner
def _get_overloaded_args(relevant_args: Iterable[Any]) -> List[Any]:
"""Returns a list of arguments on which to call __torch_function__.
Checks arguments in relevant_args for __torch_function__ implementations,
storing references to the arguments and their types in overloaded_args and
overloaded_types in order of calling precedence. Only distinct types are
considered. If a type is a subclass of another type it will have higher
precedence, otherwise the precedence order is the same as the order of
arguments in relevant_args, that is, from left-to-right in the argument list.
The precedence-determining algorithm implemented in this function is
described in `NEP-0018`_.
See torch::append_overloaded_arg for the equivalent function in the C++
implementation.
Parameters
----------
relevant_args : iterable of array-like
Iterable of array-like arguments to check for __torch_function__
methods.
Returns
-------
overloaded_args : list
Arguments from relevant_args on which to call __torch_function__
methods, in the order in which they should be called.
.. _NEP-0018:
https://numpy.org/neps/nep-0018-array-function-protocol.html
"""
# Runtime is O(num_arguments * num_unique_types)
overloaded_types: Set[Type] = set()
overloaded_args: List[Any] = []
for arg in relevant_args:
arg_type = type(arg)
# We only collect arguments if they have a unique type, which ensures
# reasonable performance even with a long list of possibly overloaded
# arguments.
if (arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__')):
# Create lists explicitly for the first type (usually the only one
# done) to avoid setting up the iterator for overloaded_args.
if overloaded_types:
overloaded_types.add(arg_type)
# By default, insert argument at the end, but if it is
# subclass of another argument, insert it before that argument.
# This ensures "subclasses before superclasses".
index = len(overloaded_args)
for i, old_arg in enumerate(overloaded_args):
if issubclass(arg_type, type(old_arg)):
index = i
break
overloaded_args.insert(index, arg)
else:
overloaded_types = {arg_type}
overloaded_args = [arg]
return overloaded_args
def handle_torch_function(
public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any:
"""Implement a function with checks for ``__torch_function__`` overrides.
See torch::autograd::handle_torch_function for the equivalent of this
function in the C++ implementation.
Arguments
---------
public_api : function
Function exposed by the public torch API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __torch_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : tuple
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
object
Result from calling ``implementation`` or an ``__torch_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
Example
-------
>>> def func(a):
... if type(a) is not torch.Tensor: # This will make func dispatchable by __torch_function__
... return handle_torch_function(func, (a,), a)
... return a + 0
"""
# Check for __torch_function__ methods.
overloaded_args = _get_overloaded_args(relevant_args)
# overloaded_args already have unique types.
types = tuple(map(type, overloaded_args))
# Call overrides
for overloaded_arg in overloaded_args:
# Use `public_api` instead of `implementation` so __torch_function__
# implementations can do equality/identity comparisons.
result = overloaded_arg.__torch_function__(public_api, types, args, kwargs)
if result is not NotImplemented:
return result
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
raise TypeError("no implementation found for '{}' on types that implement "
'__torch_function__: {}'
.format(func_name, [type(arg) for arg in overloaded_args]))
has_torch_function = _add_docstr(
_has_torch_function,
r"""Check for __torch_function__ implementations in the elements of an iterable.
Considers exact ``Tensor`` s and ``Parameter`` s non-dispatchable.
Arguments
---------
relevant_args : iterable
Iterable or aguments to check for __torch_function__ methods.
Returns
-------
bool
True if any of the elements of relevant_args have __torch_function__
implementations, False otherwise.
See Also
________
torch.is_tensor_like
Checks if something is a Tensor-like, including an exact ``Tensor``.
"""
)
has_torch_function_unary = _add_docstr(
_has_torch_function_unary,
r"""Special case of `has_torch_function` for single inputs.
Instead of:
`has_torch_function((t,))`
call:
`has_torch_function_unary(t)`
which skips unnecessary packing and unpacking work.
"""
)
has_torch_function_variadic = _add_docstr(
_has_torch_function_variadic,
r"""Special case of `has_torch_function` that skips tuple creation.
This uses the METH_FASTCALL protocol introduced in Python 3.7; for 3.6
and before it has roughly equivilent performance compared to
`has_torch_function`.
Instead of:
`has_torch_function((a, b))`
call:
`has_torch_function_variadic(a, b)`
which skips unnecessary packing and unpacking work.
"""
)
@functools.lru_cache(None)
def get_overridable_functions() -> Dict[Any, List[Callable]]:
"""List functions that are overridable via __torch_function__
Returns
-------
Dict[Any, List[Callable]]
A dictionary that maps namespaces that contain overridable functions
to functions in that namespace that can be overridden.
"""
overridable_funcs = collections.defaultdict(list)
tested_namespaces = [
(torch, torch.__all__ + dir(torch._C._VariableFunctions)),
(torch.functional, torch.functional.__all__),
(torch.nn.functional, dir(torch.nn.functional)),
(torch.Tensor, dir(torch.Tensor)),
(torch.linalg, dir(torch.linalg)),
(torch.fft, dir(torch.fft)),
(torch.special, dir(torch.special)),
]
for namespace, ns_funcs in tested_namespaces:
for func_name in ns_funcs:
# ignore private functions or functions that are deleted in torch.__init__
if namespace is not torch.Tensor:
if func_name.startswith('_'):
continue
elif func_name.endswith('_'):
continue
elif not func_name[0].islower():
continue
elif func_name == 'unique_dim':
continue
else:
func = getattr(namespace, func_name)
if getattr(object, func_name, None) == func:
continue
if func_name == '__weakref__':
continue
func = getattr(namespace, func_name)
if namespace is torch.Tensor and getattr(object, func_name, None) == func:
continue
# ignore re-exported modules
if isinstance(func, types.ModuleType):
continue
# ignore __future__ imports
if isinstance(func, __future__._Feature):
continue
if not callable(func) and hasattr(func, "__get__"):
overridable_funcs[func].append(func.__get__)
continue
if not callable(func):
continue
# cannot be overriden by __torch_function__
if func in get_ignored_functions():
msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
"but still has an explicit override")
assert func not in get_testing_overrides(), msg.format(namespace, func.__name__)
continue
overridable_funcs[namespace].append(func)
return overridable_funcs
@functools.lru_cache(None)
def _get_tensor_methods() -> Set[Callable]:
""" Returns a set of the overridable methods on ``torch.Tensor`` """
overridable_funcs = get_overridable_functions()
methods = set(overridable_funcs[torch.Tensor])
return methods
def is_tensor_method_or_property(func: Callable) -> bool:
"""
Returns True if the function passed in is a handler for a
method or property belonging to ``torch.Tensor``, as passed
into ``__torch_function__``.
.. note::
For properties, their ``__get__`` method must be passed in.
This may be needed, in particular, for the following reasons:
1. Methods/properties sometimes don't contain a `__module__` slot.
2. They require that the first passed-in argument is an instance
of ``torch.Tensor``.
Examples
--------
>>> is_tensor_method_or_property(torch.Tensor.add)
True
>>> is_tensor_method_or_property(torch.add)
False
"""
return func in _get_tensor_methods() or func.__name__ == "__get__"
def is_tensor_like(inp):
"""
Returns ``True`` if the passed-in input is a Tensor-like.
Currently, this occurs whenever there's a ``__torch_function__``
attribute on the type of the input.
Examples
--------
A subclass of tensor is generally a Tensor-like.
>>> class SubTensor(torch.Tensor): ...
>>> is_tensor_like(SubTensor([0]))
True
Built-in or user types aren't usually Tensor-like.
>>> is_tensor_like(6)
False
>>> is_tensor_like(None)
False
>>> class NotATensor: ...
>>> is_tensor_like(NotATensor())
False
But, they can be made Tensor-like by implementing __torch_function__.
>>> class TensorLike:
... def __torch_function__(self, func, types, args, kwargs):
... return -1
>>> is_tensor_like(TensorLike())
True
"""
return type(inp) is torch.Tensor or hasattr(type(inp), "__torch_function__")
| 54.259958
| 173
| 0.64409
|
4a0ee39b1774a3a42e8da462a7dc3684265b73a6
| 6,367
|
py
|
Python
|
IMS/product/migrations/0001_initial.py
|
AyushPaudel/Inventory-Management-System
|
04e57b0d02b1b7cade992b959569e750ca339c8e
|
[
"MIT"
] | 2
|
2021-09-01T13:00:24.000Z
|
2021-11-19T12:16:52.000Z
|
IMS/product/migrations/0001_initial.py
|
aadarshadhakalg/Inventory-Management-System-1
|
075ec49b9d4abebb7d9a0b150a6cb70f6cbf5144
|
[
"MIT"
] | null | null | null |
IMS/product/migrations/0001_initial.py
|
aadarshadhakalg/Inventory-Management-System-1
|
075ec49b9d4abebb7d9a0b150a6cb70f6cbf5144
|
[
"MIT"
] | 1
|
2021-12-23T23:41:20.000Z
|
2021-12-23T23:41:20.000Z
|
# Generated by Django 3.2 on 2021-08-01 06:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='categories',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('url_slug', models.SlugField(unique=True)),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='photos/categories/%Y/%m/%d/')),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_active', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='products',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('url_slug', models.SlugField(unique=True)),
('product_name', models.CharField(max_length=255)),
('brand', models.CharField(max_length=255)),
('product_max_price', models.PositiveIntegerField(default=0)),
('product_discount_price', models.PositiveIntegerField(default=0)),
('product_description', models.TextField()),
('product_long_description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('total_stock', models.PositiveIntegerField(default=1)),
('media_content', models.ImageField(blank=True, null=True, upload_to='photos/products/%Y/%m/%d/')),
('is_active', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='subCategories',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('url_slug', models.SlugField(unique=True)),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='photos/subcategories/%Y/%m/%d/')),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_active', models.IntegerField(default=1)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.categories')),
],
),
migrations.CreateModel(
name='Recipt',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('purchase_price', models.PositiveIntegerField(default=0)),
('discount_amount', models.PositiveIntegerField(default=0)),
('total_items', models.PositiveIntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('unique_token', models.CharField(blank=True, max_length=10)),
('redeemed', models.BooleanField(default=False)),
('product', models.ManyToManyField(to='product.products')),
],
),
migrations.CreateModel(
name='productTransaction',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('transaction_product_count', models.IntegerField(default=1)),
('transaction_type', models.CharField(choices=[(1, 'Cash'), (2, 'Card'), (3, 'Crypto')], max_length=255)),
('transcation_description', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.products')),
],
),
migrations.CreateModel(
name='productTags',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_active', models.IntegerField(default=1)),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.products')),
],
),
migrations.AddField(
model_name='products',
name='sub_categories_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.subcategories'),
),
migrations.CreateModel(
name='productDetails',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('title_details', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_active', models.IntegerField(default=1)),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.products')),
],
),
migrations.CreateModel(
name='productAbout',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_active', models.IntegerField(default=1)),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.products')),
],
),
migrations.CreateModel(
name='customerRecords',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_expenditure', models.PositiveIntegerField(default=0)),
('imsuser', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('recipt', models.ManyToManyField(to='product.Recipt')),
],
),
]
| 49.356589
| 124
| 0.578923
|
4a0ee3a2335531106153b88527f017ad4bad7dff
| 3,804
|
py
|
Python
|
utils_crunchy.py
|
enricoros/vc-analysis-tools
|
5137b5202c570d07f20aa25a71a538edff26a5d0
|
[
"BSD-3-Clause"
] | null | null | null |
utils_crunchy.py
|
enricoros/vc-analysis-tools
|
5137b5202c570d07f20aa25a71a538edff26a5d0
|
[
"BSD-3-Clause"
] | null | null | null |
utils_crunchy.py
|
enricoros/vc-analysis-tools
|
5137b5202c570d07f20aa25a71a538edff26a5d0
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
# uniform arrangement of the data frame
COL_NAME = 'Name'
COL_TITLE = 'Title'
COL_SERIES = 'Series'
COL_MONEY = 'Money'
COL_FUND_DATE = 'Funds Date'
COL_FUND_YEAR = 'Funds Year'
COL_LABEL = 'Label'
COL_DESCRIPTION = 'Description'
COL_INDUSTRIES = 'Industries'
_TSV_HEADERS = [COL_NAME, COL_TITLE, COL_SERIES, COL_MONEY, COL_FUND_DATE, COL_FUND_YEAR, COL_LABEL, COL_DESCRIPTION, COL_INDUSTRIES]
_TSV_OPTIONALS = ['Lead', 'Website']
# data loader: df[ Title, Name, Series, Money, Industries, Description ]
def normalize_crunchbase_df(df):
# type heuristics: pass-through
if "Label" in df:
print(' * detected a pass-through CSV')
# type heuristics: Funding Rounds
elif "Money Raised Currency (in USD)" in df and "Organization Industries" in df:
print(' * detected a Funding Rounds CSV')
df.rename(columns={
"Organization Name": COL_NAME,
"Funding Type": COL_SERIES,
"Announced Date": COL_FUND_DATE,
"Money Raised Currency (in USD)": COL_MONEY,
"Organization Industries": COL_INDUSTRIES,
"Organization Description": COL_DESCRIPTION,
}, inplace=True)
if COL_MONEY in df:
df[COL_MONEY] = df[COL_MONEY] / 1E+06
# type heuristics: Company List
elif "Total Funding Amount Currency (in USD)" in df:
print(' * detected a Company List CSV')
df.rename(columns={
"Organization Name": COL_NAME,
# Series
# Funding Date
"Total Funding Amount Currency (in USD)": COL_MONEY,
"Industries": COL_INDUSTRIES,
"Description": COL_DESCRIPTION,
}, inplace=True)
if 'Last Funding Type' in df:
df.rename(columns={"Last Funding Type": COL_SERIES}, inplace=True)
else:
df[COL_SERIES] = 'Unknown'
if 'Last Funding Date' in df:
df.rename(columns={"Last Funding Date": COL_FUND_DATE}, inplace=True)
else:
df[COL_FUND_DATE] = 'Unknown'
if COL_MONEY in df:
df[COL_MONEY] = df[COL_MONEY] / 1E+06
# type heuristics: Company Search
elif "Last Funding Type" in df:
print(' * detected a Company Search CSV')
df.rename(columns={
"Organization Name": COL_NAME,
"Last Funding Type": COL_SERIES,
"Last Funding Date": COL_FUND_DATE,
# Money
"Industries": COL_INDUSTRIES,
"Description": COL_DESCRIPTION,
}, inplace=True)
df[COL_MONEY] = 0
# type heuristics: Company List (BARE MINIMUM)
elif "Organization Name" in df and "Industries" in df and "Description" in df:
print(' * detected a BARE MINIMUM Company List CSV')
df.rename(columns={
"Organization Name": COL_NAME,
"Industries": COL_INDUSTRIES,
"Description": COL_DESCRIPTION,
}, inplace=True)
df[COL_SERIES] = 'Unknown'
df[COL_FUND_DATE] = 'Unknown'
df[COL_MONEY] = 0
# type heuristics: ?
else:
raise Exception('Wrong CSV file type')
if COL_TITLE not in df:
df[COL_TITLE] = df.apply(lambda row: row[COL_NAME] + ((' (' + str(round(row[COL_MONEY])) + ' M)') if np.isfinite(row[COL_MONEY]) else ''), axis=1)
if COL_FUND_YEAR not in df:
df[COL_FUND_YEAR] = df.apply(lambda row: row[COL_FUND_DATE][:4] if row[COL_FUND_DATE] != 'Unknown' and row[COL_FUND_DATE] == row[COL_FUND_DATE] else '', axis=1)
if COL_LABEL not in df:
df[COL_LABEL] = '_'
# add optional columns, if present in the dataset
headers = _TSV_HEADERS.copy()
for col in _TSV_OPTIONALS:
if col in df and col not in headers:
headers.append(col)
return df[headers], headers
| 37.663366
| 168
| 0.619085
|
4a0ee5a0910b2fbfb50695d9d3279887b7a1cf20
| 8,205
|
py
|
Python
|
advertools/urlytics.py
|
maximrus23/advertools
|
f6a392cea9075f49d50751f1b51cb0bc15b48647
|
[
"MIT"
] | 1
|
2021-03-18T15:19:16.000Z
|
2021-03-18T15:19:16.000Z
|
advertools/urlytics.py
|
imne-hh/advertools
|
3f71bc5d96b0f8b91bcf24a0699267b8af75472c
|
[
"MIT"
] | null | null | null |
advertools/urlytics.py
|
imne-hh/advertools
|
3f71bc5d96b0f8b91bcf24a0699267b8af75472c
|
[
"MIT"
] | null | null | null |
"""
.. _urlytics:
Split, Parse, and Analyze URLs
==============================
Extracting information from URLs can be a little tedious, yet very important.
Using the standard for URLs we can extract a lot of information in a fairly
structured manner.
There are many situations in which you have many URLs that you want to better
understand:
* **Analytics reports**: Whichever analytics sytem you use, whether Google
Analytics, search console, or any other reporting tool that reports on URLs,
your reports can be enhanced by splitting URLs, and in effect becoming four
or five data points as opposed to one.
* :ref:`Crawl datasets <crawl>`: The result of any crawl you run typically
contains the URLs, which can benefit from the same enhancement.
* :ref:`SERP datasets <serp>`: Which are basically about URLs.
* :ref:`Extracted URLs <extract>`: Extracting URLs from social media posts is
one thing you might want to do to better understand those posts, and further
splitting URLs can also help.
* :ref:`XML sitemaps <sitemaps>`: Right after downloading a sitemap(s)
splitting it further can help in giving a better perspective on the dataset.
The main function here is :func:`url_to_df`, which as the name suggests,
converts URLs to DataFrames.
>>> urls ['https://net.location.com/path_1/path_2?price=10&color=blue#frag_1',
... 'https://net.location.com/path_1/path_2?price=15&color=red#frag_2']
>>> url_to_df(urls)
url scheme netloc path query fragment dir_1 dir_2 query_price query_color
0 https://net.location.com/path_1/path_2?price=10&color=blue#frag_1 https net.location.com /path_1/path_2 price=10&color=blue frag_1 path_1 path_2 10 blue
1 https://net.location.com/path_1/path_2?price=15&color=red#frag_2 https net.location.com /path_1/path_2 price=15&color=red frag_2 path_1 path_2 15 red
* **url**: The original URLs are listed as a reference. They are decoded for
easier reading, and you can set ``decode=False`` if you want to retain the
original encoding.
* **scheme**: Self-explanatory. Note that you can also provide relative URLs
`/category/sub-category?one=1&two=2` in which case the `url`, `scheme` and
`netloc` columns would be empty. You can mix relative and absolute URLs as
well.
* **netloc**: The network location is the sub-domain (optional) together with
the domain and top-level domain and/or the country domain.
* **path**: The slug of the URL, excluding the query parameters and fragments
if any. The path is also split in to directories `dir_1, dir_2, dir_3...` to
make it easier to categorize and analyze the URLs.
* **query**: If query parameters are available they are given in this column,
but more importantly they are parsed and included in separate columns, where
each parameter has its own column (with the keys being the names). As in the
example above, the query `price=10&color=blue` becomes two columns, one for
price and the other for color. If any other URLs in the dataset contain the
same parameters, their values will be populated in the same column, and `NA`
otherwise.
* **fragment**: The final part of the URL after the hash mark `#`, linking to a
part in the page.
* **query_***: The query parameter names are prepended with `query_` to make
it easy to filter them out, and to avoid any name collissions with other
columns, if some URL contains a query parameter called "url" for example.
In the unlikely event of having a repeated parameter in the same URL, then
their values would be delimited by two "@" signs `one@@two@@three`. It's
unusual, but it happens.
* **hostname and port**: If available a column for ports will be shown, and if
the hostname is different from `netloc` it would also have its own column.
Query Parameters
----------------
The great thing about parameters is that the names are descriptive (mostly!)
and once given a certain column you can easily understand what data they
contain. Once this is done, you can sort the products by price, filter by
destination, get the red and blue items, and so on.
The URL Path (Directories):
---------------------------
Here things are not as straightforward, and there is no way to know what the
first or second directory is supposed to indicate. In general, I can think of
three main situations that you can encounter while analyzing directories.
* **Consistent URLs**: This is the simplest case, where all URLs follow the
same structure. `/en/product1` clearly shows that the first directory
indicates the language of the page. So it can also make sense to rename those
columns once you have discovered their meaning.
* **Inconsistent URLs**: This is similar to the previous situation. All URLs
follow the same pattern with a few exceptions. Take the following URLs for
example:
* /topic1/title-of-article-1
* /es/topic1/title-of-article-2
* /es/topic2/title-of-article-3
* /topic2/title-of-artilce-4
You can see that they follow the pattern `/language/topic/article-title`,
except for English, which is not explicitly mentioned, but its articles can
be identified by having two instead of three directories, as we have for
"/es/". If URLs are split in this case, yout will end up with `dir_1` having
"topic1", "es", "es", and "topic2", which distorts the data. Actually you
want to have "en", "es", "es", "en". In such cases, after making sure you
have the right rules and patterns, you might create special columns or
replace/insert values to make them consistent, and get them to a state
similar to the first example.
* **URLs of different types**: In many cases you will find that sites having
different types of pages with completely different roles on the site.
* /blog/post-1-title.html
* /community/help/topic_1
* /community/help/topic_2
Here, once you split the directories, you will see that they don't align
properly (because of different lengths), and they can't be compared easily. A
good approach is to split your dataset into one for blog posts and another
for community content for example.
The ideal case for the `path` part of the URL is to be split into directories
of equal length across the dataset, having the right data in the right columns
and `NA` otherwise. Or, splitting the dataset and analyzing separately.
"""
from urllib.parse import urlsplit, parse_qs, unquote
import pandas as pd
def url_to_df(urls, decode=True):
"""Split the given URLs into their components to a DataFrame.
Each column will have its own component, and query parameters and
directories will also be parsed and given special columns each.
:param url urls: A list of URLs to split into components
:param bool decode: Whether or not to decode the given URLs
:return DataFrame split: A DataFrame with a column for each component
"""
if isinstance(urls, str):
urls = [urls]
decode = unquote if decode else lambda x: x
split_list = []
for url in urls:
split = urlsplit(decode(url))
port = split.port
hostname = split.hostname if split.hostname != split.netloc else None
split = split._asdict()
if hostname:
split['hostname'] = hostname
if port:
split['port'] = port
parsed_query = parse_qs(split['query'])
parsed_query = {'query_' + key: '@@'.join(val)
for key, val in parsed_query.items()}
split.update(**parsed_query)
dirs = split['path'].strip('/').split('/')
if dirs[0]:
dir_cols = {'dir_{}'.format(n): d for n, d in enumerate(dirs, 1)}
split.update(**dir_cols)
split_list.append(split)
df = pd.DataFrame(split_list)
query_df = df.filter(regex='query_')
if not query_df.empty:
df = df.drop(query_df.columns, axis=1)
dirs_df = df.filter(regex='dir_')
if not dirs_df.empty:
df = df.drop(dirs_df.columns, axis=1)
df = pd.concat([df, dirs_df, query_df], axis=1)
df.insert(0, 'url', [decode(url) for url in urls])
return df
| 48.550296
| 179
| 0.704814
|
4a0ee7cdc8cdff5e01d424a1e1ddc1e87c5d081d
| 4,327
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
dalijolijo/Bitcloud
|
54fc01a6b882234f4e7cae15fa453e1e670282a1
|
[
"MIT"
] | 49
|
2017-08-31T04:28:33.000Z
|
2022-03-06T15:19:20.000Z
|
contrib/seeds/generate-seeds.py
|
dalijolijo/Bitcloud
|
54fc01a6b882234f4e7cae15fa453e1e670282a1
|
[
"MIT"
] | 15
|
2017-08-28T03:39:37.000Z
|
2021-06-05T07:48:06.000Z
|
contrib/seeds/generate-seeds.py
|
dalijolijo/Bitcloud
|
54fc01a6b882234f4e7cae15fa453e1e670282a1
|
[
"MIT"
] | 35
|
2017-08-16T12:52:36.000Z
|
2022-03-17T09:30:09.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8329)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 51474)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.355072
| 98
| 0.581465
|
4a0ee7da015995b60c12b003f590b428fc4d6f5c
| 6,867
|
py
|
Python
|
backend/glamourhaven/models.py
|
njeveh/GlamourHaven
|
5cc72f4ad430df96667f1ca7141b1e2481ad3c97
|
[
"MIT"
] | 3
|
2022-02-05T22:52:02.000Z
|
2022-02-08T08:40:12.000Z
|
backend/glamourhaven/models.py
|
njeveh/GlamourHaven
|
5cc72f4ad430df96667f1ca7141b1e2481ad3c97
|
[
"MIT"
] | 23
|
2022-02-19T00:03:45.000Z
|
2022-03-04T10:11:32.000Z
|
backend/glamourhaven/models.py
|
njeveh/GlamourHaven
|
5cc72f4ad430df96667f1ca7141b1e2481ad3c97
|
[
"MIT"
] | 6
|
2022-01-27T10:08:46.000Z
|
2022-03-02T17:52:08.000Z
|
from django.db import models
from django.db.models.deletion import CASCADE, SET_NULL, PROTECT
from datetime import datetime, timezone
from django.contrib.auth.models import User
from django.db.models.fields.related import ForeignKey
# make email in User a mandatory field
User._meta.get_field('email')._unique = True
User._meta.get_field('email').blank = False
User._meta.get_field('email').null = False
class Employee(models.Model):
"""a model representation of a glamourhaven employee"""
user = models.OneToOneField(User, on_delete=CASCADE)
first_name = models.CharField(max_length=50, null=False)
last_name = models.CharField(max_length=50, null=False)
phone_number = models.CharField(max_length=15, null=False)
national_id = models.CharField(max_length=25, null=False)
profile_picture = models.ImageField(
upload_to='images/profile', null=True)
specialization = models.TextField(null=False)
def __str__(self):
"""returns a string representation of an employee instance"""
return self.first_name + ' ' + self.last_name
class Client(models.Model):
"""a model representation of a glamourhaven client data"""
user = models.OneToOneField(User, on_delete=CASCADE)
phone_number = models.CharField(max_length=15, null=False)
is_subscribed = models.BooleanField(default=False)
profile_picture = models.ImageField(
upload_to='images/profile', null=True)
def __str__(self):
"""method returns a string representation of a client instance"""
return self.user.username
class Commodity(models.Model):
"""a model representation of a glamourhaven sale item"""
# commodity category options
options = (
('skin care', 'Skin Care'),
('body care', 'Body Care'),
('makeup', 'Makeup'),
('hair care', 'Hair Care'),
('nail care', 'Nail Care'),
('fragrance', 'Fragrance'),
('tools, devices and accessories', 'Tools, Devices and Accessories'),
)
commodity_name = models.CharField(max_length=150, null=False)
category = models.CharField(
max_length=50, choices=options, default='others')
description = models.TextField(null=False)
price = models.FloatField(null=False)
pricing_unit = models.CharField(max_length=60, null=False)
number_in_stock = models.IntegerField(null=True)
commodity_main_image = models.ImageField(
upload_to='images/commodities', null=False)
commodity_extra_image1 = models.ImageField(
upload_to='images/commodities', null=True)
commodity_extra_image2 = models.ImageField(
upload_to='images/commodities', null=True)
def __str__(self):
"""returns the name of a commodity as its string representation"""
return self.commodity_name
class Service(models.Model):
"""a model representation of a service in glamourhaven"""
service_title = models.CharField(max_length=200, null=False)
service_description = models.TextField(null=False)
service_cost = models.FloatField(null=False)
service_estimate_time = models.FloatField(null=False)
service_main_image = models.ImageField(
upload_to='images/commodities', default='images/services/main.jpg', null=False)
service_extra_image1 = models.ImageField(
upload_to='images/commodities', default='images/services/extra1.jpg', null=True)
service_extra_image2 = models.ImageField(
upload_to='images/commodities', default='images/services/extra2.jpg', null=True)
def __str__(self):
"""returns the title of a service as its string representation"""
return self.service_title
class Appointment(models.Model):
"""A model representation of an appointment booked by a client"""
# status options
status_choices = (
("pending", "Pending"),
("confirmed", "Confirmed"),
("cancelled", "Cancelled"),
("postponed", "Postponed"),
)
services = models.ManyToManyField(Service)
starting_time = models.DateTimeField(null=False)
end_time = models.DateTimeField(null=False)
client = models.ForeignKey(Client, on_delete=CASCADE)
staff = models.ForeignKey(Employee, null=True, on_delete=SET_NULL)
status = models.CharField(
max_length=20, choices=status_choices, default='pending')
def __str__(self):
"""returns a string representation of an appointment"""
return f"Appointment booked by {self.client} with {self.staff}, as from {self.starting_time} to {self.end_time}."
class LNMOnline(models.Model):
CheckoutRequestID = models.CharField(max_length=50, blank=True, null=True)
MerchantRequestID = models.CharField(max_length=20, blank=True, null=True)
ResultCode = models.IntegerField(blank=True, null=True)
ResultDesc = models.CharField(max_length=120, blank=True, null=True)
Amount = models.FloatField(blank=True, null=True)
MpesaReceiptNumber = models.CharField(max_length=15, blank=True, null=True)
Balance = models.CharField(max_length=12, blank=True, null=True)
TransactionDate = models.DateTimeField(blank=True, null=True)
PhoneNumber = models.CharField(max_length=13, blank=True, null=True)
def __str__(self):
return f"{self.PhoneNumber} >> {self.Amount} >> {self.MpesaReceiptNumber}"
class LNMOrder(models.Model):
order_items = models.ManyToManyField(Commodity)
date_placed = models.DateTimeField(default=datetime.now, null=True)
order_value = models.FloatField(null=False)
is_delivered = models.BooleanField(default=False, null=False)
date_delivered = models.DateTimeField(null=True)
payment_transaction = models.ForeignKey(
LNMOnline, on_delete=PROTECT, null=False)
placer = models.ForeignKey(Client, on_delete=SET_NULL, null=True)
transaction_id = models.CharField(max_length=15, blank=True, null=True)
class C2BPayment(models.Model):
TransactionType = models.CharField(max_length=12, blank=True, null=True)
TransID = models.CharField(max_length=12, blank=True, null=True)
TransTime = models.CharField(max_length=14, blank=True, null=True)
TransAmount = models.CharField(max_length=12, blank=True, null=True)
BusinessShortCode = models.CharField(max_length=6, blank=True, null=True)
BillRefNumber = models.CharField(max_length=20, blank=True, null=True)
InvoiceNumber = models.CharField(max_length=20, blank=True, null=True)
OrgAccountBalance = models.CharField(max_length=12, blank=True, null=True)
ThirdPartyTransID = models.CharField(max_length=20, blank=True, null=True)
MSISDN = models.CharField(max_length=12, blank=True, null=True)
FirstName = models.CharField(max_length=20, blank=True, null=True)
MiddleName = models.CharField(max_length=20, blank=True, null=True)
LastName = models.CharField(max_length=20, blank=True, null=True)
| 43.738854
| 121
| 0.72113
|
4a0ee992cb0d4d084ddfc102d74533c2bd4a7071
| 2,254
|
py
|
Python
|
gen/generate.py
|
jasonliang-dev/elm-heroicons
|
2ce1815bd7e136d845f38965f5293e5dac9e4e2a
|
[
"MIT"
] | 1
|
2021-06-14T06:39:48.000Z
|
2021-06-14T06:39:48.000Z
|
gen/generate.py
|
jasonliang-dev/elm-heroicons
|
2ce1815bd7e136d845f38965f5293e5dac9e4e2a
|
[
"MIT"
] | 1
|
2020-09-18T14:34:15.000Z
|
2020-09-19T15:37:44.000Z
|
gen/generate.py
|
jasonliang-dev/elm-heroicons
|
2ce1815bd7e136d845f38965f5293e5dac9e4e2a
|
[
"MIT"
] | null | null | null |
import base64
import os.path
import sys
import copy
import xml.etree.ElementTree as ET
from attr_lookup import svg_attrs
def strip_namespace(xml_tag):
uri, tag = xml_tag[1:].split("}")
return tag
def to_attr(xml_attr):
name, value = xml_attr
attr = svg_attrs.get(name, name)
# function name collision with `x` icon
if attr == "x":
attr = "Svg.Attributes." + attr
return f'{attr} "{value}"'
def to_elm(node):
tag = strip_namespace(node.tag)
if tag == "svg":
attr_sep = " :: "
attr_format = "({} :: attrs)"
else:
attr_sep = ", "
attr_format = "[{}]"
tag_collisions = ["path", "clipPath"]
if tag in tag_collisions:
tag = "Svg." + tag
attrs = attr_sep.join(map(to_attr, node.attrib.items()))
children = ", ".join(map(to_elm, node))
return f"{tag} " + attr_format.format(attrs) + f" [{children}]"
def node_to_b64(tree):
tree_cpy = copy.deepcopy(tree).getroot()
tree_cpy.attrib["width"] = "32"
tree_cpy.attrib["height"] = "32"
return base64.b64encode(ET.tostring(tree_cpy)).decode("utf-8")
func_template = """
{{-| {icon_name}

-}}
{func} : List (Attribute msg) -> Html msg
{func} attrs =
{func_body}
"""
source_code = ""
funcs_list = []
ET.register_namespace("", "http://www.w3.org/2000/svg")
for svg_file in sys.argv[2:]:
tree = ET.parse(svg_file)
icon_name = os.path.basename(svg_file).replace(".svg", "")
firstWord, *rest = icon_name.split("-")
func_name = firstWord + "".join(word.capitalize() for word in rest)
source_code += func_template.format(
func=func_name,
func_body=to_elm(tree.getroot()),
icon_name=icon_name,
b64_icon=node_to_b64(tree),
)
funcs_list.append(func_name)
module_name = sys.argv[1]
exposing_funcs = ", ".join(funcs_list)
top_of_file = f"""module Heroicons.{module_name} exposing ({exposing_funcs})
{{-|
# Heroicons
@docs {exposing_funcs}
-}}
import Html exposing (Html)
import Svg exposing (Attribute, svg, defs, g, rect)
import Svg.Attributes exposing (..)
"""
with open("{}.elm".format(module_name), "w") as file_out:
file_out.write(top_of_file + source_code)
| 21.673077
| 76
| 0.636202
|
4a0eea685e016dba2c2cf35f7ae4f267c093f5d1
| 305
|
py
|
Python
|
app2_geomap/read_info.py
|
lasupernova/Random-Apps
|
488b187ae6a4f32d9f6c2bbfdfd4718150863f36
|
[
"MIT"
] | null | null | null |
app2_geomap/read_info.py
|
lasupernova/Random-Apps
|
488b187ae6a4f32d9f6c2bbfdfd4718150863f36
|
[
"MIT"
] | null | null | null |
app2_geomap/read_info.py
|
lasupernova/Random-Apps
|
488b187ae6a4f32d9f6c2bbfdfd4718150863f36
|
[
"MIT"
] | null | null | null |
import pandas as pd
class InfoFile():
def __init__(self, path, engine):
self.file = pd.read_excel(path, engine=engine).set_index('Name')
def get_info(self):
return self.file.loc['Kate Sheppard','Text']
if __name__ == "__main__":
InfoFile('media/texts.ods', 'odf').get_info()
| 27.727273
| 72
| 0.659016
|
4a0eeb5f95023dd2bdfb50263b1f7d0a19550f22
| 74
|
py
|
Python
|
tests/models/__init__.py
|
osanwe/Open-Vocabulary-Learning-on-Source-Code-with-a-Graph-Structured-Cache
|
d0d6e2b2414e6774dd6c78b0c48c2a9db6c3e181
|
[
"MIT"
] | 26
|
2018-10-11T22:06:31.000Z
|
2021-07-22T09:02:45.000Z
|
tests/models/__init__.py
|
osanwe/Open-Vocabulary-Learning-on-Source-Code-with-a-Graph-Structured-Cache
|
d0d6e2b2414e6774dd6c78b0c48c2a9db6c3e181
|
[
"MIT"
] | 3
|
2018-10-22T15:54:54.000Z
|
2021-06-22T11:06:54.000Z
|
tests/models/__init__.py
|
osanwe/Open-Vocabulary-Learning-on-Source-Code-with-a-Graph-Structured-Cache
|
d0d6e2b2414e6774dd6c78b0c48c2a9db6c3e181
|
[
"MIT"
] | 9
|
2018-12-06T09:43:15.000Z
|
2021-06-15T18:43:37.000Z
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
| 37
| 73
| 0.77027
|
4a0eeb71b15ae97aee399553d2cc607af865220f
| 7,599
|
py
|
Python
|
pennylane/templates/embeddings/amplitude.py
|
ryanlevy/pennylane
|
fb03b09d17267ebd0b9050432f9eeb84b5dff200
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/embeddings/amplitude.py
|
ryanlevy/pennylane
|
fb03b09d17267ebd0b9050432f9eeb84b5dff200
|
[
"Apache-2.0"
] | 1
|
2021-05-27T05:36:41.000Z
|
2021-05-27T05:36:41.000Z
|
pennylane/templates/embeddings/amplitude.py
|
ryanlevy/pennylane
|
fb03b09d17267ebd0b9050432f9eeb84b5dff200
|
[
"Apache-2.0"
] | 1
|
2021-05-15T03:25:40.000Z
|
2021-05-15T03:25:40.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the AmplitudeEmbedding template.
"""
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
import warnings
import numpy as np
import pennylane as qml
from pennylane.operation import Operation, AnyWires
from pennylane.ops import QubitStateVector
from pennylane.wires import Wires
# tolerance for normalization
TOLERANCE = 1e-10
class AmplitudeEmbedding(Operation):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
By setting ``pad_with`` to a real or complex number, ``features`` is automatically padded to dimension
:math:`2^n` where :math:`n` is the number of qubits used in the embedding.
To represent a valid quantum state vector, the L2-norm of ``features`` must be one.
The argument ``normalize`` can be set to ``True`` to automatically normalize the features.
If both automatic padding and normalization are used, padding is executed *before* normalizing.
.. note::
On some devices, ``AmplitudeEmbedding`` must be the first operation of a quantum circuit.
.. warning::
At the moment, the ``features`` argument is **not differentiable** when using the template, and
gradients with respect to the features cannot be computed by PennyLane.
Args:
features (tensor_like): input tensor of dimension ``(2^n,)``, or less if `pad_with` is specified
wires (Iterable): wires that the template acts on
pad_with (float or complex): if not None, the input is padded with this constant to size :math:`2^n`
normalize (bool): whether to automatically normalize the features
pad (float or complex): same as `pad`, to be deprecated
Example:
Amplitude embedding encodes a normalized :math:`2^n`-dimensional feature vector into the state
of :math:`n` qubits:
.. code-block:: python
import pennylane as qml
from pennylane.templates import AmplitudeEmbedding
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(f=None):
AmplitudeEmbedding(features=f, wires=range(2))
return qml.expval(qml.PauliZ(0))
circuit(f=[1/2, 1/2, 1/2, 1/2])
The final state of the device is - up to a global phase - equivalent to the input passed to the circuit:
>>> dev.state
[0.5+0.j 0.5+0.j 0.5+0.j 0.5+0.j]
**Differentiating with respect to the features**
Due to non-trivial classical processing to construct the state preparation circuit,
the features argument is in general **not differentiable**.
**Normalization**
The template will raise an error if the feature input is not normalized.
One can set ``normalize=True`` to automatically normalize it:
.. code-block:: python
@qml.qnode(dev)
def circuit(f=None):
AmplitudeEmbedding(features=f, wires=range(2), normalize=True)
return qml.expval(qml.PauliZ(0))
circuit(f=[15, 15, 15, 15])
>>> dev.state
[0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j]
**Padding**
If the dimension of the feature vector is smaller than the number of amplitudes,
one can automatically pad it with a constant for the missing dimensions using the ``pad_with`` option:
.. code-block:: python
from math import sqrt
@qml.qnode(dev)
def circuit(f=None):
AmplitudeEmbedding(features=f, wires=range(2), pad_with=0.)
return qml.expval(qml.PauliZ(0))
circuit(f=[1/sqrt(2), 1/sqrt(2)])
>>> dev.state
[0.70710678 + 0.j, 0.70710678 + 0.j, 0.0 + 0.j, 0.0 + 0.j]
"""
num_params = 1
num_wires = AnyWires
par_domain = "A"
def __init__(self, features, wires, pad_with=None, normalize=False, pad=None, do_queue=True):
# pad is replaced with the more verbose pad_with
if pad is not None:
warnings.warn(
"The pad argument will be replaced by the pad_with option in future versions of PennyLane.",
UserWarning,
)
if pad_with is None:
pad_with = pad
wires = Wires(wires)
self.pad_with = pad_with
self.normalize = normalize
features = self._preprocess(features, wires, pad_with, normalize)
super().__init__(features, wires=wires, do_queue=do_queue)
def adjoint(self): # pylint: disable=arguments-differ
return qml.adjoint(qml.templates.MottonenStatePreparation)(
self.parameters[0], wires=self.wires
)
def expand(self):
with qml.tape.QuantumTape() as tape:
QubitStateVector(self.parameters[0], wires=self.wires)
return tape
@staticmethod
def _preprocess(features, wires, pad_with, normalize):
"""Validate and pre-process inputs as follows:
* Check that the features tensor is one-dimensional.
* If pad_with is None, check that the first dimension of the features tensor
has length :math:`2^n` where :math:`n` is the number of qubits. Else check that the
first dimension of the features tensor is not larger than :math:`2^n` and pad features with value if necessary.
* If normalize is false, check that first dimension of features is normalised to one. Else, normalise the
features tensor.
"""
shape = qml.math.shape(features)
# check shape
if len(shape) != 1:
raise ValueError(f"Features must be a one-dimensional tensor; got shape {shape}.")
n_features = shape[0]
if pad_with is None and n_features != 2 ** len(wires):
raise ValueError(
f"Features must be of length {2 ** len(wires)}; got length {n_features}. "
f"Use the 'pad' argument for automated padding."
)
if pad_with is not None and n_features > 2 ** len(wires):
raise ValueError(
f"Features must be of length {2 ** len(wires)} or "
f"smaller to be padded; got length {n_features}."
)
# pad
if pad_with is not None and n_features < 2 ** len(wires):
padding = [pad_with] * (2 ** len(wires) - n_features)
features = qml.math.concatenate([features, padding], axis=0)
# normalize
norm = qml.math.sum(qml.math.abs(features) ** 2)
if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):
if normalize or pad_with:
features = features / np.sqrt(norm)
else:
raise ValueError(
f"Features must be a vector of length 1.0; got length {norm}."
"Use 'normalize=True' to automatically normalize."
)
features = qml.math.cast(features, np.complex128)
return features
| 36.533654
| 121
| 0.630872
|
4a0eec11839523f9defa006d41446a19018bf9ad
| 3,244
|
py
|
Python
|
example/layers/attention.py
|
Achazwl/BMTrain
|
776c10b21886f12137641c56b12ebf8d601aa9e0
|
[
"Apache-2.0"
] | 19
|
2022-03-14T12:30:23.000Z
|
2022-03-31T11:52:29.000Z
|
example/layers/attention.py
|
Achazwl/BMTrain
|
776c10b21886f12137641c56b12ebf8d601aa9e0
|
[
"Apache-2.0"
] | 1
|
2022-03-24T02:11:32.000Z
|
2022-03-24T02:14:17.000Z
|
example/layers/attention.py
|
Achazwl/BMTrain
|
776c10b21886f12137641c56b12ebf8d601aa9e0
|
[
"Apache-2.0"
] | 5
|
2022-03-18T02:03:02.000Z
|
2022-03-29T13:19:09.000Z
|
from typing import Optional
import torch
import bmtrain as bmt
from layers import Linear
import math
class Attention(bmt.DistributedModule):
def __init__(self,
dim_model : int, dim_head : int,
num_heads : int, bias : bool = True,
dtype = None
) -> None:
super().__init__()
self.project_q = Linear(dim_model, dim_head * num_heads, bias=bias, dtype=dtype)
self.project_k = Linear(dim_model, dim_head * num_heads, bias=bias, dtype=dtype)
self.project_v = Linear(dim_model, dim_head * num_heads, bias=bias, dtype=dtype)
self.project_out = Linear(dim_head * num_heads, dim_model, bias=bias, dtype=dtype)
self.softmax = torch.nn.Softmax(dim=-1)
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_model = dim_model
def forward(self,
hidden_q : torch.Tensor, # (batch_size, seq_q, dim_model)
hidden_kv : torch.Tensor, # (batch_size, seq_kv, dim_model)
mask : torch.BoolTensor, # (batch_size, seq_q, seq_kv)
position_bias : Optional[torch.Tensor] = None, # (batch, num_heads, seq_q, seq_kv)
) -> torch.Tensor:
batch_size, seq_q, dim_model = hidden_q.size()
seq_kv = hidden_kv.size(1)
h_q : torch.Tensor = self.project_q(hidden_q)
h_k : torch.Tensor = self.project_k(hidden_kv)
h_v : torch.Tensor = self.project_v(hidden_kv)
h_q = h_q.view(batch_size, seq_q, self.num_heads, self.dim_head)
h_k = h_k.view(batch_size, seq_kv, self.num_heads, self.dim_head)
h_v = h_v.view(batch_size, seq_kv, self.num_heads, self.dim_head)
h_q = h_q.permute(0, 2, 1, 3).contiguous()
h_k = h_k.permute(0, 2, 1, 3).contiguous()
h_v = h_v.permute(0, 2, 1, 3).contiguous()
h_q = h_q.view(batch_size * self.num_heads, seq_q, self.dim_head)
h_k = h_k.view(batch_size * self.num_heads, seq_kv, self.dim_head)
h_v = h_v.view(batch_size * self.num_heads, seq_kv, self.dim_head)
score = torch.bmm(
h_q, h_k.transpose(1, 2)
)
score = score / math.sqrt(self.dim_head)
score = score.view(batch_size, self.num_heads, seq_q, seq_kv)
if position_bias is not None:
score = score + position_bias.view(batch_size, self.num_heads, seq_q, seq_kv)
score = torch.where(
mask.view(batch_size, 1, seq_q, seq_kv),
score,
torch.scalar_tensor(float('-inf'), device=score.device, dtype=score.dtype)
)
score = torch.where(
mask.view(batch_size, 1, seq_q, seq_kv),
self.softmax(score),
torch.scalar_tensor(0, device=score.device, dtype=score.dtype)
)
score = score.view(batch_size * self.num_heads, seq_q, seq_kv)
h_out = torch.bmm(
score, h_v
)
h_out = h_out.view(batch_size, self.num_heads, seq_q, self.dim_head)
h_out = h_out.permute(0, 2, 1, 3).contiguous()
h_out = h_out.view(batch_size, seq_q, self.num_heads * self.dim_head)
attn_out = self.project_out(h_out)
return attn_out
| 36.449438
| 96
| 0.610049
|
4a0eecb9ae857dd5a0d40f879741b87fe2afb18c
| 1,978
|
py
|
Python
|
google-cloud-sdk/lib/surface/compute/target_ssl_proxies/describe.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/compute/target_ssl_proxies/describe.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/compute/target_ssl_proxies/describe.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing target SSL proxies."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.target_ssl_proxies import flags
class Describe(base.DescribeCommand):
"""Display detailed information about a target SSL proxy."""
TARGET_SSL_PROXY_ARG = None
@staticmethod
def Args(parser):
Describe.TARGET_SSL_PROXY_ARG = flags.TargetSslProxyArgument()
Describe.TARGET_SSL_PROXY_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
ref = self.TARGET_SSL_PROXY_ARG.ResolveAsResource(args, holder.resources)
client = holder.client.apitools_client
messages = holder.client.messages
request = messages.ComputeTargetSslProxiesGetRequest(
project=ref.project, targetSslProxy=ref.Name())
errors = []
resources = holder.client.MakeRequests(
[(client.targetSslProxies, 'Get', request)], errors)
if errors:
utils.RaiseToolException(errors)
return resources[0]
Describe.detailed_help = {
'brief': 'Display detailed information about a target SSL proxy',
'DESCRIPTION': """\
*{command}* displays all data associated with a target SSL proxy
in a project.
""",
}
| 34.701754
| 80
| 0.749242
|
4a0eed132a649b0797f936007f5dfc38c8f7bdff
| 3,808
|
py
|
Python
|
tests/logsim.py
|
valodzka/tailon
|
7b690cf47c2d2ff26d1eead86aebf7e00ea02e82
|
[
"Apache-2.0"
] | 1
|
2018-09-25T15:58:37.000Z
|
2018-09-25T15:58:37.000Z
|
tests/logsim.py
|
ahmed-musallam/tailon
|
2aa5313e1447ccb8250a5e7989f05b4927134981
|
[
"Apache-2.0"
] | null | null | null |
tests/logsim.py
|
ahmed-musallam/tailon
|
2aa5313e1447ccb8250a5e7989f05b4927134981
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''Writes log-file-looking lines to files.'''
import os
import time
import argparse
import asyncio
from random import choice, randint, seed
from datetime import datetime as dt
agents = (
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.29.13 (KHTML, like Gecko) Version/6.0.4 Safari/536.29.13',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31',
'Opera/9.80 (Windows NT 6.1; WOW64) Presto/2.12.388 Version/12.15',
)
paths = (
'/js/app/app.js',
'/js/app/router.js',
'/js/app/model/User.js',
'/js/app/model/Chat.js',
'/js/app/model/ChatMessage.js',
'/js/app/view/NavView.js',
'/js/app/view/AppView.js',
'/js/app/view/TrackView.js',
'/js/app/view/ChatView.js',
'/js/app/view/HomeView.js',
'/js/app/view/DiscoverView.js',
'/js/app/view/SignupView.js',
'/js/app/view/CreateRoomView.js',
'/js/app/view/ListenersView.j',
'/js/app/view/LoginView.js',
'/index.html',
)
methods = 'POST', 'GET', 'HEAD'
codes = 304, 404, 300, 400, 200
logfmt = '[{now:%d/%b/%Y:%H:%M:%S %z}] "{method} {path} HTTP/1.1" {status} 0 "{agent}"\n'
def generate_lines():
while True:
yield logfmt.format(
now=dt.now(),
method=choice(methods),
path=choice(paths),
status=choice(codes),
agent=choice(agents),
)
async def writer(fn, gen, lock, rate=(1, 4), update_msec=(500, 1000)):
while True:
n = randint(*rate) if isinstance(rate, (tuple, list)) else rate
s = randint(*update_msec) if isinstance(update_msec, (tuple, list)) else update_msec
async with lock:
with open(fn, 'a') as fh:
for i in range(n):
fh.write(next(gen))
fh.flush()
await asyncio.sleep(s/1000)
async def truncater(fn, lock, truncate_msec=10000):
while True:
await asyncio.sleep(truncate_msec/1000)
async with lock:
fh = open(fn, 'w')
fh.close()
async def logwriter(args):
gen = generate_lines()
lock = asyncio.Lock()
coros = []
for fn in args.files:
w = writer(fn, gen, lock=lock, rate=args.rate, update_msec=args.update_msec)
if not args.no_truncate:
t = truncater(fn, lock=lock, truncate_msec=args.truncate_msec)
coros.append(w)
coros.append(t)
await asyncio.gather(*coros)
def main():
def tuple_or_int(value):
if ',' in value:
return [int(i) for i in value.split(',')]
else:
return int(value)
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--update-msec', default=1000, metavar='msec', type=tuple_or_int)
arg('--truncate-msec', default=10000, metavar='msec', type=tuple_or_int)
arg('--no-truncate', action='store_false')
arg('--rate', default=1, metavar='msec', type=tuple_or_int)
arg('--seed', default=str(time.time()))
arg('files', nargs=argparse.REMAINDER)
args = parser.parse_args()
args.files = [os.path.abspath(fn) for fn in args.files]
print('using random seed: %s' % args.seed)
seed(args.seed)
loop = asyncio.get_event_loop()
loop.run_until_complete(logwriter(args))
loop.close()
if __name__ == '__main__':
main()
| 30.709677
| 126
| 0.616071
|
4a0eeda4c8bf79d27ad4ae11e98470b9e4ed3a76
| 251
|
py
|
Python
|
genocide/slg.py
|
bthate/genocide
|
8de7a2cccee7315ae6cf5661738ba1335e30a5ba
|
[
"DOC"
] | null | null | null |
genocide/slg.py
|
bthate/genocide
|
8de7a2cccee7315ae6cf5661738ba1335e30a5ba
|
[
"DOC"
] | null | null | null |
genocide/slg.py
|
bthate/genocide
|
8de7a2cccee7315ae6cf5661738ba1335e30a5ba
|
[
"DOC"
] | null | null | null |
# This file is placed in the Public Domain.
#
# EM_T04_OTP-CR-117_19 otp.informationdesk@icc-cpi.int https://genocide.rtfd.io
txt = "EM_T04_OTP-CR-117_19 otp.informationdesk@icc-cpi.int https://genocide.rtfd.io"
def slg(event):
event.reply(txt)
| 27.888889
| 85
| 0.749004
|
4a0eee88b9e944ef32ccc85474172409aed5bed1
| 16,531
|
py
|
Python
|
main_analysis/50.construct_figure3.py
|
thodk/proteostasis_imprinting_across_evolution
|
e6e414b36bbfaf2f4c4ac6fc80a31c6c13c3902e
|
[
"MIT"
] | null | null | null |
main_analysis/50.construct_figure3.py
|
thodk/proteostasis_imprinting_across_evolution
|
e6e414b36bbfaf2f4c4ac6fc80a31c6c13c3902e
|
[
"MIT"
] | null | null | null |
main_analysis/50.construct_figure3.py
|
thodk/proteostasis_imprinting_across_evolution
|
e6e414b36bbfaf2f4c4ac6fc80a31c6c13c3902e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pandas
import os
import sys
import numpy
import json
import operator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.lines import Line2D
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from sklearn.manifold import MDS
sys.path.append('../')
from core_functions import remove_unannotated
from core_functions import construct_graph_from_mongo
import core_classes
import heatmap
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['FreeSans', ]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['axes.titlepad'] = 4
matplotlib.rcParams['xtick.major.pad']='0'
matplotlib.rcParams['ytick.major.pad']='0'
def worker_for_components(clusters_to_keep):
to_keep_terms = []
for cluster in clusters_to_keep:
cluster_obj = GCP.entries[cluster]
members = cluster_obj.get_all_members()
to_keep_terms.extend(members)
enriched_terms = [i for j in outputs_mapping.values() for i in j]
enriched_terms = sorted(list(set(enriched_terms)))
to_remove_terms = []
for term in enriched_terms:
new_terms = substitutions_dict[term]
if len(set(new_terms).intersection(to_keep_terms)) == 0:
to_remove_terms.append(term)
else:
pass
final_terms = list(set(enriched_terms).difference(to_remove_terms))
output_df = terms_sem_sim_matrix.loc[final_terms,final_terms]
return output_df
def worker_for_species(tmp_terms_sim_matrix):
final_terms = tmp_terms_sim_matrix.index.tolist()
output_matrix = numpy.ones((len(species), len(species)))
for i in range(len(species)):
terms1 = list(set(outputs_mapping[species[i]]).intersection(final_terms))
for j in range(i+1,len(species)):
terms2 = list(set(outputs_mapping[species[j]]).intersection(final_terms))
print i, j
if len(terms1) == 0 and len(terms2) == 0:
pair_sim = 0
else:
pair_matrix_1 = tmp_terms_sim_matrix.loc[terms1, terms2]
pair_matrix_2 = tmp_terms_sim_matrix.loc[terms2, terms1]
pair_sim = semantics.get_average_best_matches([pair_matrix_1, pair_matrix_2])
output_matrix[i,j] = output_matrix[j,i] = round(pair_sim, 3)
tmp = [species_names[o] for o in species]
zero_line_indices = list(numpy.where(numpy.all(output_matrix == 0, axis=1))[0])
output_matrix = numpy.delete(arr=output_matrix, axis=0, obj=zero_line_indices)
output_matrix = numpy.delete(arr=output_matrix, axis=1, obj=zero_line_indices)
tmp = numpy.delete(numpy.array(tmp), obj=zero_line_indices)
output_matrix = 1 - output_matrix
numpy.fill_diagonal(output_matrix, 0)
output_matrix = pandas.DataFrame(output_matrix, index=tmp, columns=tmp)
return output_matrix
def worker_for_single_scatter_plot(matrix_df, output):
mds_obj = MDS(dissimilarity='precomputed', n_components=2, n_jobs=4,
random_state=100, eps=1e-6, n_init=10)
colors = [colors_taxonomy_mapping[species_names_tax_mapping[i]] for i in matrix_df.index]
coords = mds_obj.fit_transform(matrix_df.values)
return coords, colors
def worker_for_all_scatter_plot(ax, coords, colors, title):
ax.scatter(coords[:,0], coords[:,1], c=colors, s=5, linewidths=.0, edgecolor='black', alpha=0.85)
ax.set_xlim([-lim,lim])
ax.set_ylim([-lim,lim])
plt.xticks(ticks, size=0)
plt.yticks(ticks, size=0)
plt.title(title, size=8)
plt.xticks(ticks, size=5)
plt.xlabel('Eigen-component 1', size=8, labelpad=1)
plt.yticks(ticks, size=5)
plt.ylabel('Eigen-component 2', size=8, labelpad=1)
plt.tick_params(labelsize=6, pad=0.75, width=0.75, length=2, direction='out')
#plt.setp(ax.get_xticklabels(), visible=False)
#plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(True, linestyle='--', alpha=0.5, linewidth=0.3)
if __name__ == '__main__':
main_dir = './PN_components/'
clustering_dir = main_dir+'terms_clustering/'
if not os.path.exists(clustering_dir):
os.makedirs(clustering_dir)
G = construct_graph_from_mongo('GO_P', mongo_database='background')
with open('./PN_analysis/standardized_graph/GO_P_terms_substitutions.json', 'r') as f:
substitutions_dict = json.load(f)
all_terms = list(set([i for i in substitutions_dict.keys()]))
to_remove_terms = list(set(G.entries.keys()).difference(all_terms))
G = remove_unannotated(G, to_remove_terms)
semantics = core_classes.Semantics(G)
print 'LOADED GRAPH', len(G.entries)
'''
STEP 1: Construction of association matrix and heatmap between the main
PN components and species
'''
# load pathway analysis results
pathway_analysis_outputs = os.listdir('./PN_analysis/pathway_analysis_outputs/')
outputs_mapping = {}
for f in pathway_analysis_outputs:
df = pandas.read_csv('./PN_analysis/pathway_analysis_outputs/'+f, sep=',')
tmp_species = f.replace('_GO_P.csv', '')
terms = df.term_id.tolist()
ics = [ (term, semantics.get_information_content(term, criterion='graph_corpus')) for term in terms]
ics = sorted(ics, key=operator.itemgetter(1), reverse=True)
filter_terms = [k[0] for k in filter(lambda x: x[1] >= 0.25, ics)]
df = df.loc[df.term_id.isin(filter_terms), :]
length = df.shape[0]
if length < 100:
pass
elif df.loc[df.corrected_pvalue <= 0.05].shape[0] >= 100:
df = df.loc[df.corrected_pvalue <= 0.05]
else:
df = df.iloc[0:100,:]
all_terms = df.term_id
outputs_mapping.update({tmp_species: all_terms})
print 'LOADED PA RESULTS'
# substitute terms with their ancestors from the standardized GO-BP
new_outputs_mapping = {}
for tmp_species, terms in outputs_mapping.items():
new_terms = []
for term in terms:
new_terms.extend(substitutions_dict[term])
new_terms = list(set(new_terms))
new_outputs_mapping.update({tmp_species:new_terms})
final_terms = list(set([i for l in new_outputs_mapping.values() for i in l]))
print 'CLUSTERING INIT', len(final_terms)
# Resnik clustering
GCP = core_classes.GraphClusteringProcessMICA(entries=final_terms, graph_instance=G,
metric='resnik', criterion='graph_corpus',
replace_method='mica')
GCP.clusteringT(threshold=0.175)
clusters = sorted(GCP.entries.keys())
species = sorted(new_outputs_mapping.keys())
print 'CLUSTERING FINISH', len(clusters)
clustering_df = pandas.DataFrame({'cluster_id':[], 'term_id':[] })
for cluster, obj in GCP.entries.items():
members = obj.get_all_members()
for m in members:
clustering_df = clustering_df.append({'cluster_id':cluster,
'term_id':m},
ignore_index=True)
clustering_df.to_csv(clustering_dir+'clustering.csv', sep=',')
# Construct the association matrix
M = numpy.zeros((len(clusters),len(species)))
print 'association matrix', M.shape
for i, cluster in enumerate(clusters):
obj = GCP.entries[cluster]
members = obj.get_all_members()
for j, tmp_species in enumerate(species):
if len(set(new_outputs_mapping[tmp_species]).intersection(members)) > 0:
M[i,j] = 1
else:
M[i,j] = 0
association_df = pandas.DataFrame(M, columns=species, index=clusters)
common_components = list(association_df.index[association_df.apply(lambda x: sum(x)/float(len(x)) >= 0.9, axis=1)])
different_componenets = list(set(clusters).difference(common_components))
defs = [G.get_entry_obj(i).definition for i in clusters]
association_df = pandas.DataFrame(M, columns=species, index=defs)
association_df.to_csv(main_dir+'PN_components_matrix.tsv', sep='\t')
association_matrix = association_df.values
# Construct the heatmap of the association matrix
colors_df = pandas.read_csv('./files/colors.csv', sep=',')
colors_taxonomy_mapping = dict(zip(colors_df.taxonomy, colors_df.color))
species_df = pandas.read_csv('./files/species.tsv', sep='\t')
species_names = dict(zip(species_df.abbreviation, species_df.species_name))
species_abbr_tax_mapping = dict(zip(species_df.abbreviation, species_df.taxonomy))
species_names_tax_mapping = dict(zip(species_df.species_name, species_df.taxonomy))
row_clustering_data={'data':association_matrix, 'method':'ward', 'metric':'hamming', 'ratio':0.0}
col_clustering_data={'data':association_matrix.transpose(), 'method':'ward', 'metric':'hamming',
'ratio':0.1}
y_labels = [i for i in list(association_df.index)]
y_labels_for_plot = [i.capitalize() for i in y_labels]
y_labels_dict = dict(zip(y_labels, y_labels_for_plot))
x_labels = [i for i in list(association_df.columns.values)]
x_labels_for_plot = [species_names[i] for i in x_labels]
x_labels_dict = dict(zip(x_labels, x_labels_for_plot))
colors = [colors_taxonomy_mapping[species_abbr_tax_mapping[i]] for i in x_labels]
colors_data = {'colors':colors, 'ratio':0.04}
colors_legend_data = {}
colors_legend_data.update({'patches': [[], []]})
for key, value in colors_taxonomy_mapping.items():
colors_legend_data['patches'][0].append(key.capitalize())
p = Line2D([0], [0], marker='o', color=value, markerfacecolor=value,
markersize=12, label=key.capitalize())
colors_legend_data['patches'][1].append(p)
colors_legend_data.update({'title':'Taxonomy'})
colors_legend_data.update({'bbox_anchor':(0.165,0.5), 'fontsize':8,
'handlelength':0.7, 'handletextpad':1,
'handleheight':2, 'title_size':10, 'y_align':0.4})
legend_labels = ['NO', 'YES']
legend_title = 'Association'
legend_data = {'x':0.1, 'y':0.5, 'w':0.02, 'h':0.15, 'labels':legend_labels,
'labels_size':8, 'cbar_kws':{'ticks':[0.25, 0.75]},
'title':legend_title, 'title_size':10}
x_specific_labels_format = {}
for label in x_labels:
tmp_dict = {'color': colors_taxonomy_mapping[species_abbr_tax_mapping[label]],
'weight':600}
x_specific_labels_format.update({x_labels_dict[label]:tmp_dict})
x_axis_data = {'labels': x_labels_for_plot,
'specific_labels_format':x_specific_labels_format,
'fontdict':{'size':1, 'rotation':90}}
y_specific_labels_format = {}
for i, label in enumerate(y_labels):
if clusters[i] in common_components:
tmp_dict = {'color': '#B30000', 'weight':600}
y_specific_labels_format.update({y_labels_dict[label]:tmp_dict})
y_axis_data = {'labels': y_labels_for_plot,
'specific_labels_format':y_specific_labels_format,
'fontdict':{'size':5}}
heatmap_data={'data':association_matrix, 'type':'features', 'x_ratio':1.5, 'y_ratio':1.2}
cmap = LinearSegmentedColormap.from_list("my_colormap", ('#eaeaea', '#000000'), N=2)
c = heatmap.Clustergram(heatmap_data, figsize=(7,5.5), cmap=cmap,
y_axis_data=y_axis_data,
x_axis_data=x_axis_data,
row_clustering_data=row_clustering_data,
col_clustering_data=col_clustering_data,
row_colors_data = None,
col_colors_data = colors_data,
colors_legend_data = colors_legend_data,
vmin=0.0, vmax=1, legend_data=legend_data,
linecolor='#e0e0e0', linewidth=0.0)
c.construction()
c.set_coords()
c.set_labels()
c.clustergrid.ax_heatmap.set_frame_on(False)
c.clustergrid.ax_heatmap.tick_params(pad=1, width=0.05, length=0.3, direction='out')
output = main_dir+'figure3_A'
c.clustergrid.savefig(output+'.png', dpi=800, format='png')
#c.clustergrid.savefig(main_dir+output+'.tiff', dpi=800, format='tiff')
c.clustergrid.savefig(output+'.pdf', dpi=800, format='pdf')
'''
STEP 2: Constrution of scatter plots for the different partitions of the
PN components, based on the MDS
'''
'''
scaffold_similarity_matrix = pandas.read_csv('./PN_analysis/standardized_graph/mean_sem_sim_matrix.csv',
sep=',', index_col=0)
enriched_terms = [i for j in outputs_mapping.values() for i in j]
enriched_terms = sorted(list(set(enriched_terms)))
terms_sem_sim_matrix = numpy.ones((len(enriched_terms), len(enriched_terms)))
for t1 in range(len(enriched_terms)):
term1 = enriched_terms[t1]
new_term1 = substitutions_dict[term1]
for t2 in range(t1+1, len(enriched_terms)):
term2 = enriched_terms[t2]
new_term2 = substitutions_dict[term2]
sim = scaffold_similarity_matrix.loc[new_term1, new_term2].values.mean()
terms_sem_sim_matrix[t1,t2] = terms_sem_sim_matrix[t2,t1] = round(sim,3)
terms_sem_sim_matrix = pandas.DataFrame(terms_sem_sim_matrix,
columns=enriched_terms,
index=enriched_terms)
'''
terms_sem_sim_matrix = pandas.read_csv('./PN_components/enriched_terms_semantic_similarities.csv',
sep=',', index_col=0)
print 'SCATTER PLOTS'
all_componenets = clusters
all_components_sim_matrix = worker_for_components(all_componenets)
species_sim_matrix = worker_for_species(all_components_sim_matrix)
species_sim_matrix.to_csv(main_dir+'all_components_sim_matrix.csv', sep=',')
#species_sim_matrix = pandas.read_csv(main_dir+'all_components_sim_matrix.csv', sep=',', index_col=0)
coords1, colors1 = worker_for_single_scatter_plot(species_sim_matrix, main_dir+'all_componenents')
df1 = pandas.DataFrame(coords1, index=species)
df1.loc[:,'colors'] = colors1
df1.to_csv(main_dir+'all_componenets_coords.csv', sep=',')
common_components_sim_matrix = worker_for_components(common_components)
species_sim_matrix2 = worker_for_species(common_components_sim_matrix)
species_sim_matrix2.to_csv(main_dir+'common_components_sim_matrix.csv', sep=',')
#species_sim_matrix2 = pandas.read_csv(main_dir+'common_components_sim_matrix.csv', sep=',', index_col=0)
coords2, colors2 = worker_for_single_scatter_plot(species_sim_matrix2, main_dir+'common')
df2 = pandas.DataFrame(coords2, index=species)
df2.loc[:,'colors'] = colors2
df2.to_csv(main_dir+'common_components_coords.csv', sep=',')
different_components_sim_matrix = worker_for_components(different_componenets)
species_sim_matrix3 = worker_for_species(different_components_sim_matrix)
species_sim_matrix3.to_csv(main_dir+'differential_components_sim_matrix.csv', sep=',')
#species_sim_matrix3 = pandas.read_csv(main_dir+'differential_components_sim_matrix.csv', sep=',', index_col=0)
coords3, colors3 = worker_for_single_scatter_plot(species_sim_matrix3, main_dir+'common')
df3 = pandas.DataFrame(coords3, index=species)
df3.loc[:,'colors'] = colors3
df3.to_csv(main_dir+'differential_components_coords.csv', sep=',')
plt.close()
lim = 0.7
ticks = numpy.arange(-0.6, 0.6+0.1, 0.2)
fig = plt.figure(1, figsize=(7,2), frameon=False)
grids = GridSpec(1,3, hspace=0, wspace=0.3)
ax = fig.add_subplot(grids[0,1])
worker_for_all_scatter_plot(ax, coords2, colors2, 'Common Components of\nProteostasis Network')
ax = fig.add_subplot(grids[0,2])
worker_for_all_scatter_plot(ax, coords3, colors3, 'Differential Components of\nProteostasis Network')
ax = fig.add_subplot(grids[0,0])
worker_for_all_scatter_plot(ax, coords1, colors1, 'Entire Proteostasis\nNetwork')
output = main_dir+'figure3_B'
plt.savefig(output+'.png', dpi=800, format='png', bbox_inches='tight', pad_inches=0.05)
#plt.savefig(output+'.tiff', dpi=800, format='tiff', bbox_inches='tight', pad_inches=0.05)
plt.savefig(output+'.pdf', dpi=600, format='pdf', bbox_inches='tight', pad_inches=0.05)
| 43.274869
| 119
| 0.669711
|
4a0eeeb1005ae7b4b14b03157eefd674d553686f
| 13,211
|
py
|
Python
|
mitmproxy/flowfilter.py
|
caputomarcos/mitmproxy
|
d24f76c98e9673d4c66121c007080a62a0998569
|
[
"MIT"
] | null | null | null |
mitmproxy/flowfilter.py
|
caputomarcos/mitmproxy
|
d24f76c98e9673d4c66121c007080a62a0998569
|
[
"MIT"
] | null | null | null |
mitmproxy/flowfilter.py
|
caputomarcos/mitmproxy
|
d24f76c98e9673d4c66121c007080a62a0998569
|
[
"MIT"
] | 1
|
2020-11-07T08:54:29.000Z
|
2020-11-07T08:54:29.000Z
|
"""
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bq rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
import re
import sys
import functools
from mitmproxy import http
from mitmproxy import websocket
from mitmproxy import tcp
from mitmproxy import flow
from mitmproxy.utils import strutils
import pyparsing as pp
from typing import Callable
def only(*types):
def decorator(fn):
@functools.wraps(fn)
def filter_types(self, flow):
if isinstance(flow, types):
return fn(self, flow)
return False
return filter_types
return decorator
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print("{spacing}{name}{expr}".format(
spacing="\t" * indent,
name=self.__class__.__name__,
expr=getattr(self, "expr", "")
), file=fp)
class _Action(_Token):
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FMarked(_Action):
code = "marked"
help = "Match marked flows"
def __call__(self, f):
return f.marked
class FHTTP(_Action):
code = "http"
help = "Match HTTP flows"
@only(http.HTTPFlow)
def __call__(self, f):
return True
class FWebSocket(_Action):
code = "websocket"
help = "Match WebSocket flows"
@only(websocket.WebSocketFlow)
def __call__(self, f):
return True
class FTCP(_Action):
code = "tcp"
help = "Match TCP flows"
@only(tcp.TCPFlow)
def __call__(self, f):
return True
class FReq(_Action):
code = "q"
help = "Match request with no response"
@only(http.HTTPFlow)
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
@only(http.HTTPFlow)
def __call__(self, f):
return bool(f.response)
class _Rex(_Action):
flags = 0
is_binary = True
def __init__(self, expr):
self.expr = expr
if self.is_binary:
expr = strutils.escaped_str_to_bytes(expr)
try:
self.re = re.compile(expr, self.flags)
except:
raise ValueError("Cannot compile expression.")
def _check_content_type(rex, message):
return any(
name.lower() == b"content-type" and
rex.search(value)
for name, value in message.headers.fields
)
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, Javascript, Flash, images."
ASSET_TYPES = [
b"text/javascript",
b"application/x-javascript",
b"application/javascript",
b"text/css",
b"image/.*",
b"application/x-shockwave-flash"
]
ASSET_TYPES = [re.compile(x) for x in ASSET_TYPES]
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
@only(http.HTTPFlow)
def __call__(self, f):
if _check_content_type(self.re, f.request):
return True
elif f.response and _check_content_type(self.re, f.response):
return True
return False
class FContentTypeRequest(_Rex):
code = "tq"
help = "Request Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
return _check_content_type(self.re, f.request)
class FContentTypeResponse(_Rex):
code = "ts"
help = "Response Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
return _check_content_type(self.re, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
if f.response and self.re.search(bytes(f.response.headers)):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and self.re.search(bytes(f.response.headers)):
return True
class FBod(_Rex):
code = "b"
help = "Body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if self.re.search(msg.content):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if msg.from_client and self.re.search(msg.content):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if not msg.from_client and self.re.search(msg.content):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
flags = re.IGNORECASE
@only(http.HTTPFlow)
def __call__(self, f):
return bool(self.re.search(f.request.data.method))
class FDomain(_Rex):
code = "d"
help = "Domain"
flags = re.IGNORECASE
@only(http.HTTPFlow)
def __call__(self, f):
return bool(self.re.search(f.request.data.host))
class FUrl(_Rex):
code = "u"
help = "URL"
is_binary = False
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
@only(http.HTTPFlow)
def __call__(self, f):
return self.re.search(f.request.url)
class FSrc(_Rex):
code = "src"
help = "Match source address"
is_binary = False
def __call__(self, f):
return f.client_conn.address and self.re.search(repr(f.client_conn.address))
class FDst(_Rex):
code = "dst"
help = "Match destination address"
is_binary = False
def __call__(self, f):
return f.server_conn.address and self.re.search(repr(f.server_conn.address))
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and f.response.status_code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filter_unary = [
FAsset,
FErr,
FHTTP,
FMarked,
FReq,
FResp,
FTCP,
]
filter_rex = [
FBod,
FBodRequest,
FBodResponse,
FContentType,
FContentTypeRequest,
FContentTypeResponse,
FDomain,
FDst,
FHead,
FHeadRequest,
FHeadResponse,
FMethod,
FSrc,
FUrl,
]
filter_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for klass in filter_unary:
f = pp.Literal("~%s" % klass.code) + pp.WordEnd()
f.setParseAction(klass.make)
parts.append(f)
simplerex = "".join(c for c in pp.printables if c not in "()~'\"")
rex = pp.Word(simplerex) |\
pp.QuotedString("\"", escChar='\\') |\
pp.QuotedString("'", escChar='\\')
for klass in filter_rex:
f = pp.Literal("~%s" % klass.code) + pp.WordEnd() + rex.copy()
f.setParseAction(klass.make)
parts.append(f)
for klass in filter_int:
f = pp.Literal("~%s" % klass.code) + pp.WordEnd() + pp.Word(pp.nums)
f.setParseAction(klass.make)
parts.append(f)
# A naked rex is a URL rex:
f = rex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.operatorPrecedence(atom,
[(pp.Literal("!").suppress(),
1,
pp.opAssoc.RIGHT,
lambda x: FNot(*x)),
(pp.Literal("&").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FAnd(*x)),
(pp.Literal("|").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FOr(*x)),
])
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
TFilter = Callable[[flow.Flow], bool]
def parse(s: str) -> TFilter:
try:
flt = bnf.parseString(s, parseAll=True)[0]
flt.pattern = s
return flt
except pp.ParseException:
return None
except ValueError:
return None
def match(flt, flow):
"""
Matches a flow against a compiled filter expression.
Returns True if matched, False if not.
If flt is a string, it will be compiled as a filter expression.
If the expression is invalid, ValueError is raised.
"""
if isinstance(flt, str):
flt = parse(flt)
if not flt:
raise ValueError("Invalid filter expression.")
if flt:
return flt(flow)
return True
help = []
for i in filter_unary:
help.append(
("~%s" % i.code, i.help)
)
for i in filter_rex:
help.append(
("~%s regex" % i.code, i.help)
)
for i in filter_int:
help.append(
("~%s int" % i.code, i.help)
)
help.sort()
help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
| 23.976407
| 84
| 0.561426
|
4a0ef55e528d9c2c2d6a85bb42af8ab186ac0247
| 726
|
py
|
Python
|
test/command_line/test_convert_to_cbf.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 2
|
2021-03-17T11:25:46.000Z
|
2021-11-18T04:20:54.000Z
|
test/command_line/test_convert_to_cbf.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 2
|
2020-07-31T22:37:30.000Z
|
2020-07-31T23:08:55.000Z
|
test/command_line/test_convert_to_cbf.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
from __future__ import absolute_import, division, print_function
import procrunner
import pytest
@pytest.mark.parametrize("filename", ["image_15799_master.h5", "image_15799.nxs"])
def test_convert_to_cbf(dials_data, filename, tmpdir):
result = procrunner.run(
["dials.import", dials_data("vmxi_thaumatin") / filename, "image_range=1,10"],
working_directory=tmpdir,
)
result.check_returncode()
assert not result.stderr
assert tmpdir.join("imported.expt").check()
result = procrunner.run(
["dials.convert_to_cbf", "imported.expt"], working_directory=tmpdir
)
result.check_returncode()
assert not result.stderr
assert len(tmpdir.listdir("as_cbf_*.cbf")) == 10
| 30.25
| 86
| 0.714876
|
4a0ef59812a1ab82d1ab3631f338e25633aaff9a
| 2,024
|
py
|
Python
|
saleor/graphql/core/types/meta.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 8
|
2018-07-17T13:13:21.000Z
|
2022-03-01T17:02:34.000Z
|
saleor/graphql/core/types/meta.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 3
|
2021-03-09T16:28:16.000Z
|
2022-02-10T19:08:00.000Z
|
saleor/graphql/core/types/meta.py
|
elwoodxblues/saleor
|
5e4e4a4259a011d24b04ebd24c77c689de843fa1
|
[
"CC-BY-4.0"
] | 1
|
2021-04-15T07:16:34.000Z
|
2021-04-15T07:16:34.000Z
|
from operator import itemgetter
import graphene
class MetaItem(graphene.ObjectType):
key = graphene.String(required=True, description="Key for stored data.")
value = graphene.String(required=True, description="Stored metadata value.")
class MetaClientStore(graphene.ObjectType):
name = graphene.String(required=True, description="Metadata clients name.")
metadata = graphene.List(
MetaItem, required=True, description="Metadata stored for a client."
)
@staticmethod
def resolve_metadata(root, _info):
return sorted(
[{"key": k, "value": v} for k, v in root["metadata"].items()],
key=itemgetter("key"),
)
class MetaStore(graphene.ObjectType):
namespace = graphene.String(
required=True, description="Name of metadata client group."
)
clients = graphene.List(
MetaClientStore,
required=True,
description="List of clients that stored metadata in a group.",
)
@staticmethod
def resolve_clients(root: dict, _info):
return sorted(
[
{"name": key, "metadata": value}
for key, value in root["metadata"].items()
],
key=itemgetter("name"),
)
class MetaPath(graphene.InputObjectType):
namespace = graphene.String(
required=True, description="Name of metadata client group."
)
client_name = graphene.String(required=True, description="Metadata clients name.")
key = graphene.String(required=True, description="Key for stored data.")
class MetaInput(MetaPath):
value = graphene.String(required=True, description="Stored metadata value.")
class MetadataObjectType(graphene.ObjectType):
private_meta = graphene.List(
MetaStore,
required=True,
description="List of privately stored metadata namespaces.",
)
meta = graphene.List(
MetaStore,
required=True,
description="List of publicly stored metadata namespaces.",
)
| 29.333333
| 86
| 0.652668
|
4a0ef70f77dadaf8e16a9180a6b04e4a805f540d
| 12,042
|
py
|
Python
|
samcli/commands/local/lib/sam_function_provider.py
|
trenton/aws-sam-cli
|
11db934d3584c17fb5ba94d0e92e291c2c91d7c9
|
[
"Apache-2.0"
] | 1
|
2019-12-24T17:27:09.000Z
|
2019-12-24T17:27:09.000Z
|
samcli/commands/local/lib/sam_function_provider.py
|
ShreyaGangishetty/aws-sam-cli
|
f896920468770f3407a3035b9c8e04902578d556
|
[
"Apache-2.0"
] | 1
|
2021-06-02T02:44:08.000Z
|
2021-06-02T02:44:08.000Z
|
samcli/commands/local/lib/sam_function_provider.py
|
CavHack/aws-sam-cli
|
9355b7b613af907055b9ea5fb199f5d6d501c490
|
[
"Apache-2.0"
] | null | null | null |
"""
Class that provides functions from a given SAM template
"""
import logging
import six
from samcli.commands.local.cli_common.user_exceptions import InvalidLayerVersionArn
from .exceptions import InvalidLayerReference
from .provider import FunctionProvider, Function, LayerVersion
from .sam_base_provider import SamBaseProvider
LOG = logging.getLogger(__name__)
class SamFunctionProvider(FunctionProvider):
"""
Fetches and returns Lambda Functions from a SAM Template. The SAM template passed to this provider is assumed
to be valid, normalized and a dictionary.
It may or may not contain a function.
"""
_SERVERLESS_FUNCTION = "AWS::Serverless::Function"
_LAMBDA_FUNCTION = "AWS::Lambda::Function"
_SERVERLESS_LAYER = "AWS::Serverless::LayerVersion"
_LAMBDA_LAYER = "AWS::Lambda::LayerVersion"
_DEFAULT_CODEURI = "."
def __init__(self, template_dict, parameter_overrides=None):
"""
Initialize the class with SAM template data. The SAM template passed to this provider is assumed
to be valid, normalized and a dictionary. It should be normalized by running all pre-processing
before passing to this class. The process of normalization will remove structures like ``Globals``, resolve
intrinsic functions etc.
This class does not perform any syntactic validation of the template.
After the class is initialized, any changes to the ``template_dict`` will not be reflected in here.
You need to explicitly update the class with new template, if necessary.
:param dict template_dict: SAM Template as a dictionary
:param dict parameter_overrides: Optional dictionary of values for SAM template parameters that might want
to get substituted within the template
"""
self.template_dict = SamBaseProvider.get_template(template_dict, parameter_overrides)
self.resources = self.template_dict.get("Resources", {})
LOG.debug("%d resources found in the template", len(self.resources))
# Store a map of function name to function information for quick reference
self.functions = self._extract_functions(self.resources)
def get(self, name):
"""
Returns the function given name or LogicalId of the function. Every SAM resource has a logicalId, but it may
also have a function name. This method searches only for LogicalID and returns the function that matches
it.
:param string name: Name of the function
:return Function: namedtuple containing the Function information if function is found.
None, if function is not found
:raises ValueError If name is not given
"""
if not name:
raise ValueError("Function name is required")
return self.functions.get(name)
def get_all(self):
"""
Yields all the Lambda functions available in the SAM Template.
:yields Function: namedtuple containing the function information
"""
for _, function in self.functions.items():
yield function
@staticmethod
def _extract_functions(resources):
"""
Extracts and returns function information from the given dictionary of SAM/CloudFormation resources. This
method supports functions defined with AWS::Serverless::Function and AWS::Lambda::Function
:param dict resources: Dictionary of SAM/CloudFormation resources
:return dict(string : samcli.commands.local.lib.provider.Function): Dictionary of function LogicalId to the
Function configuration object
"""
result = {}
for name, resource in resources.items():
resource_type = resource.get("Type")
resource_properties = resource.get("Properties", {})
if resource_type == SamFunctionProvider._SERVERLESS_FUNCTION:
layers = SamFunctionProvider._parse_layer_info(resource_properties.get("Layers", []), resources)
result[name] = SamFunctionProvider._convert_sam_function_resource(name, resource_properties, layers)
elif resource_type == SamFunctionProvider._LAMBDA_FUNCTION:
layers = SamFunctionProvider._parse_layer_info(resource_properties.get("Layers", []), resources)
result[name] = SamFunctionProvider._convert_lambda_function_resource(name, resource_properties, layers)
# We don't care about other resource types. Just ignore them
return result
@staticmethod
def _convert_sam_function_resource(name, resource_properties, layers):
"""
Converts a AWS::Serverless::Function resource to a Function configuration usable by the provider.
:param string name: LogicalID of the resource NOTE: This is *not* the function name because not all functions
declare a name
:param dict resource_properties: Properties of this resource
:return samcli.commands.local.lib.provider.Function: Function configuration
"""
codeuri = SamFunctionProvider._extract_sam_function_codeuri(name, resource_properties, "CodeUri")
LOG.debug("Found Serverless function with name='%s' and CodeUri='%s'", name, codeuri)
return Function(
name=name,
runtime=resource_properties.get("Runtime"),
memory=resource_properties.get("MemorySize"),
timeout=resource_properties.get("Timeout"),
handler=resource_properties.get("Handler"),
codeuri=codeuri,
environment=resource_properties.get("Environment"),
rolearn=resource_properties.get("Role"),
layers=layers,
)
@staticmethod
def _extract_sam_function_codeuri(name, resource_properties, code_property_key):
"""
Extracts the SAM Function CodeUri from the Resource Properties
Parameters
----------
name str
LogicalId of the resource
resource_properties dict
Dictionary representing the Properties of the Resource
code_property_key str
Property Key of the code on the Resource
Returns
-------
str
Representing the local code path
"""
codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI)
# CodeUri can be a dictionary of S3 Bucket/Key or a S3 URI, neither of which are supported
if isinstance(codeuri, dict) or (isinstance(codeuri, six.string_types) and codeuri.startswith("s3://")):
codeuri = SamFunctionProvider._DEFAULT_CODEURI
LOG.warning(
"Lambda function '%s' has specified S3 location for CodeUri which is unsupported. "
"Using default value of '%s' instead",
name,
codeuri,
)
return codeuri
@staticmethod
def _convert_lambda_function_resource(name, resource_properties, layers): # pylint: disable=invalid-name
"""
Converts a AWS::Serverless::Function resource to a Function configuration usable by the provider.
:param string name: LogicalID of the resource NOTE: This is *not* the function name because not all functions
declare a name
:param dict resource_properties: Properties of this resource
:return samcli.commands.local.lib.provider.Function: Function configuration
"""
# CodeUri is set to "." in order to get code locally from current directory. AWS::Lambda::Function's ``Code``
# property does not support specifying a local path
codeuri = SamFunctionProvider._extract_lambda_function_code(resource_properties, "Code")
LOG.debug("Found Lambda function with name='%s' and CodeUri='%s'", name, codeuri)
return Function(
name=name,
runtime=resource_properties.get("Runtime"),
memory=resource_properties.get("MemorySize"),
timeout=resource_properties.get("Timeout"),
handler=resource_properties.get("Handler"),
codeuri=codeuri,
environment=resource_properties.get("Environment"),
rolearn=resource_properties.get("Role"),
layers=layers,
)
@staticmethod
def _extract_lambda_function_code(resource_properties, code_property_key):
"""
Extracts the Lambda Function Code from the Resource Properties
Parameters
----------
resource_properties dict
Dictionary representing the Properties of the Resource
code_property_key str
Property Key of the code on the Resource
Returns
-------
str
Representing the local code path
"""
codeuri = resource_properties.get(code_property_key, SamFunctionProvider._DEFAULT_CODEURI)
if isinstance(codeuri, dict):
codeuri = SamFunctionProvider._DEFAULT_CODEURI
return codeuri
@staticmethod
def _parse_layer_info(list_of_layers, resources):
"""
Creates a list of Layer objects that are represented by the resources and the list of layers
Parameters
----------
list_of_layers List(str)
List of layers that are defined within the Layers Property on a function
resources dict
The Resources dictionary defined in a template
Returns
-------
List(samcli.commands.local.lib.provider.Layer)
List of the Layer objects created from the template and layer list defined on the function. The order
of the layers does not change.
I.E: list_of_layers = ["layer1", "layer2"] the return would be [Layer("layer1"), Layer("layer2")]
"""
layers = []
for layer in list_of_layers:
if layer == "arn:aws:lambda:::awslayer:AmazonLinux1803":
LOG.debug("Skipped arn:aws:lambda:::awslayer:AmazonLinux1803 as the containers are AmazonLinux1803")
continue
if layer == "arn:aws:lambda:::awslayer:AmazonLinux1703":
raise InvalidLayerVersionArn(
"Building and invoking locally only supports AmazonLinux1803. See "
"https://aws.amazon.com/blogs/compute/upcoming-updates-to-the-aws-lambda-execution-environment/ for more detials."
) # noqa: E501
# If the layer is a string, assume it is the arn
if isinstance(layer, six.string_types):
layers.append(LayerVersion(layer, None))
continue
# In the list of layers that is defined within a template, you can reference a LayerVersion resource.
# When running locally, we need to follow that Ref so we can extract the local path to the layer code.
if isinstance(layer, dict) and layer.get("Ref"):
layer_logical_id = layer.get("Ref")
layer_resource = resources.get(layer_logical_id)
if not layer_resource or layer_resource.get("Type", "") not in (
SamFunctionProvider._SERVERLESS_LAYER,
SamFunctionProvider._LAMBDA_LAYER,
):
raise InvalidLayerReference()
layer_properties = layer_resource.get("Properties", {})
resource_type = layer_resource.get("Type")
codeuri = None
if resource_type == SamFunctionProvider._LAMBDA_LAYER:
codeuri = SamFunctionProvider._extract_lambda_function_code(layer_properties, "Content")
if resource_type == SamFunctionProvider._SERVERLESS_LAYER:
codeuri = SamFunctionProvider._extract_sam_function_codeuri(
layer_logical_id, layer_properties, "ContentUri"
)
layers.append(LayerVersion(layer_logical_id, codeuri))
return layers
| 42.104895
| 134
| 0.659193
|
4a0ef71d7b59d774727e176d9ffff747269edb3f
| 1,236
|
py
|
Python
|
python/setup.py
|
mazhurin/spark-iforest
|
cde82cad838cdafde66108afcadbafe773530303
|
[
"Apache-2.0"
] | null | null | null |
python/setup.py
|
mazhurin/spark-iforest
|
cde82cad838cdafde66108afcadbafe773530303
|
[
"Apache-2.0"
] | null | null | null |
python/setup.py
|
mazhurin/spark-iforest
|
cde82cad838cdafde66108afcadbafe773530303
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from setuptools import setup, find_packages
import sys
import io
setup(
name="pyspark-iforest",
version="2.4.0.99",
author="Titicaca",
author_email="lake_titicaca@outlook.com",
description="PySpark Wrapper for Spark-IForest",
long_description=io.open("README.md", encoding="UTF-8").read(),
license="MIT",
url="https://github.com/titicaca/spark-iforest/python",
packages=find_packages(),
entry_points={
},
data_files=[#('data', []),
('doc', ['README.md']),
],
include_package_data=True,
classifiers=[
"Environment :: Web Environment",
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: Chinese',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: NLP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'pyspark==2.4.0'
],
zip_safe=True,
)
| 30.146341
| 71
| 0.59466
|
4a0ef7441e3e3dfe63aa0808a80bc11a3e1a7e37
| 6,995
|
py
|
Python
|
src/pythae/pipelines/training.py
|
eknag/benchmark_VAE
|
8b727f29a68aff7771c4c97aff15f75f88320e1f
|
[
"Apache-2.0"
] | 1
|
2022-03-20T20:23:59.000Z
|
2022-03-20T20:23:59.000Z
|
src/pythae/pipelines/training.py
|
eknag/benchmark_VAE
|
8b727f29a68aff7771c4c97aff15f75f88320e1f
|
[
"Apache-2.0"
] | null | null | null |
src/pythae/pipelines/training.py
|
eknag/benchmark_VAE
|
8b727f29a68aff7771c4c97aff15f75f88320e1f
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Union
import logging
import numpy as np
import torch
from torch.optim import Optimizer
from ..customexception import LoadError
from ..data.preprocessors import DataProcessor
from ..models import BaseAE, VAE, VAEConfig
from ..trainers import *
from .base_pipeline import Pipeline
logger = logging.getLogger(__name__)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logger.setLevel(logging.INFO)
class TrainingPipeline(Pipeline):
"""
This Pipeline provides an end to end way to train your VAE model.
The trained model will be saved in ``output_dir`` stated in the
:class:`~pythae.trainers.BaseTrainingConfig`. A folder
``training_YYYY-MM-DD_hh-mm-ss`` is
created where checkpoints and final model will be saved. Checkpoints are saved in
``checkpoint_epoch_{epoch}`` folder (optimizer and training config
saved as well to resume training if needed)
and the final model is saved in a ``final_model`` folder. If ``output_dir`` is
None, data is saved in ``dummy_output_dir/training_YYYY-MM-DD_hh-mm-ss`` is created.
Parameters:
model (Optional[BaseAE]): An instance of :class:`~pythae.models.BaseAE` you want to train.
If None, a default :class:`~pythae.models.VAE` model is used. Default: None.
training_config (Optional[BaseTrainingConfig]=None): An instance of
:class:`~pythae.trainers.BaseTrainingConfig` stating the training
parameters. If None, a default configuration is used.
"""
def __init__(
self,
model: Optional[BaseAE]=None,
training_config: Optional[BaseTrainingConfig]=None,
):
if model is not None:
if training_config is None:
if model.model_name == 'RAE_L2':
training_config = CoupledOptimizerTrainerConfig(
encoder_optim_decay=0,
decoder_optim_decay=model.model_config.reg_weight
)
elif model.model_name == 'Adversarial_AE' or model.model_name == 'FactorVAE':
training_config = AdversarialTrainerConfig()
elif model.model_name == 'VAEGAN':
training_config = CoupledOptimizerAdversarialTrainerConfig()
else:
training_config = BaseTrainingConfig()
elif model.model_name == 'RAE_L2':
if not isinstance(
training_config, CoupledOptimizerTrainerConfig):
raise AssertionError("A 'CoupledOptimizerTrainerConfig' "
"is expected for training a RAE_L2")
training_config.encoder_optim_decay = 0.
training_config.decoder_optim_decay = model.model_config.reg_weight
elif model.model_name == 'Adversarial_AE' or model.model_name == 'FactorVAE':
if not isinstance(
training_config, AdversarialTrainerConfig):
raise AssertionError("A 'AdversarialTrainer' "
f"is expected for training a {model.model_name}")
elif model.model_name == 'VAEGAN':
if not isinstance(
training_config, CoupledOptimizerAdversarialTrainerConfig):
raise AssertionError("A 'CoupledOptimizerAdversarialTrainer' "
"is expected for training a VAEGAN")
if not isinstance(
training_config, BaseTrainingConfig):
raise AssertionError("A 'BaseTrainingConfig' "
"is expected for the pipeline")
else:
training_config = BaseTrainingConfig()
self.data_processor = DataProcessor()
self.model = model
self.training_config = training_config
def _set_default_model(self, data):
model_config = VAEConfig(input_dim=data.shape[1:])
model = VAE(model_config)
self.model = model
def __call__(
self,
train_data: Union[np.ndarray, torch.Tensor],
eval_data: Union[np.ndarray, torch.Tensor] = None,
log_output_dir: str = None,
):
"""
Launch the model training on the provided data.
Args:
training_data (Union[~numpy.ndarray, ~torch.Tensor]): The training data as a
:class:`numpy.ndarray` or :class:`torch.Tensor` of shape (mini_batch x
n_channels x ...)
eval_data (Optional[Union[~numpy.ndarray, ~torch.Tensor]]): The evaluation data as a
:class:`numpy.ndarray` or :class:`torch.Tensor` of shape (mini_batch x
n_channels x ...). If None, only uses train_fata for training. Default: None.
"""
logger.info("Preprocessing train data...")
train_data = self.data_processor.process_data(train_data)
train_dataset = self.data_processor.to_dataset(train_data)
self.train_data = train_data
if self.model is None:
self._set_default_model(train_data)
if eval_data is not None:
logger.info("Preprocessing eval data...\n")
eval_data = self.data_processor.process_data(eval_data)
eval_dataset = self.data_processor.to_dataset(eval_data)
else:
eval_dataset = None
if isinstance(self.training_config, CoupledOptimizerTrainerConfig):
logger.info("Using Coupled Optimizer Trainer\n")
trainer = CoupledOptimizerTrainer(
model=self.model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
training_config=self.training_config
)
elif isinstance(self.training_config, AdversarialTrainerConfig):
logger.info("Using Adversarial Trainer\n")
trainer = AdversarialTrainer(
model=self.model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
training_config=self.training_config
)
elif isinstance(self.training_config, CoupledOptimizerAdversarialTrainerConfig):
logger.info("Using Coupled Optimizer Adversarial Trainer\n")
trainer = CoupledOptimizerAdversarialTrainer(
model=self.model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
training_config=self.training_config
)
elif isinstance(self.training_config, BaseTrainingConfig):
logger.info("Using Base Trainer\n")
trainer = BaseTrainer(
model=self.model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
training_config=self.training_config
)
self.trainer = trainer
trainer.train(log_output_dir=log_output_dir)
| 36.432292
| 98
| 0.622302
|
4a0ef81bd16c08e8952ff40e4654d32b362d5a12
| 3,150
|
py
|
Python
|
packages/mbed-greentea/setup.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | null | null | null |
packages/mbed-greentea/setup.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | null | null | null |
packages/mbed-greentea/setup.py
|
noralsydmp/mbed-os-tools
|
5a14958aa49eb5764afba8e1dc3208cae2955cd7
|
[
"Apache-2.0"
] | null | null | null |
"""
This module defines the attributes of the
PyPI package for the mbed SDK test suite
"""
"""
mbed SDK
Copyright (c) 2011-2019 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import os
from io import open
from distutils.core import setup
from setuptools import find_packages
DESCRIPTION = "mbed 3.0 onwards test suite, codename Greentea. The test suite is a collection of tools that enable automated testing on mbed-enabled platforms"
OWNER_NAMES = 'Anna Bridge, Qinghao Shi'
OWNER_EMAILS = 'Anna.Bridge@arm.com, qinghao.shi@arm.com'
repository_dir = os.path.dirname(__file__)
def read(fname):
"""
Utility function to cat in a file (used for the README)
@param fname: the name of the file to read,
relative to the directory containing this file
@return: The string content of the opened file
"""
with open(os.path.join(repository_dir, fname), mode='r') as f:
return f.read()
with open(os.path.join(repository_dir, 'requirements.txt')) as fh:
requirements = fh.readlines()
with open(os.path.join(repository_dir, 'test_requirements.txt')) as fh:
test_requirements = fh.readlines()
python_requires = '>=2.7.10, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4'
setup(name='mbed-greentea',
version='1.7.4',
description=DESCRIPTION,
long_description=read('README.md'),
long_description_content_type='text/markdown',
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url='https://github.com/ARMmbed/mbed-os-tools',
packages=find_packages(),
license="Apache-2.0",
test_suite='test',
entry_points={
"console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main", ],
},
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Embedded Systems',
'Topic :: Software Development :: Testing',
),
python_requires=python_requires,
install_requires=requirements,
tests_require=test_requirements
)
| 35
| 159
| 0.68127
|
4a0ef832cfa9e37aad7c6c51fbd40b16109f9d92
| 5,022
|
py
|
Python
|
tests/pkb_test.py
|
robfrut135/PerfKitBenchmarker
|
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
|
[
"Apache-2.0"
] | null | null | null |
tests/pkb_test.py
|
robfrut135/PerfKitBenchmarker
|
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
|
[
"Apache-2.0"
] | null | null | null |
tests/pkb_test.py
|
robfrut135/PerfKitBenchmarker
|
ccaf81b47ed5e3f27065e8f8d9fc42d071bfc22c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pkb.py."""
import unittest
import mock
from perfkitbenchmarker import flags
from perfkitbenchmarker import pkb
from perfkitbenchmarker import stages
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
class TestCreateFailedRunSampleFlag(unittest.TestCase):
def PatchPkbFunction(self, function_name):
patcher = mock.patch(pkb.__name__ + '.' + function_name)
mock_function = patcher.start()
self.addCleanup(patcher.stop)
return mock_function
def setUp(self):
self.flags_mock = self.PatchPkbFunction('FLAGS')
self.provision_mock = self.PatchPkbFunction('DoProvisionPhase')
self.prepare_mock = self.PatchPkbFunction('DoPreparePhase')
self.run_mock = self.PatchPkbFunction('DoRunPhase')
self.cleanup_mock = self.PatchPkbFunction('DoCleanupPhase')
self.teardown_mock = self.PatchPkbFunction('DoTeardownPhase')
self.make_failed_run_sample_mock = self.PatchPkbFunction(
'MakeFailedRunSample')
self.flags_mock.skip_pending_runs_file = None
self.flags_mock.run_stage = [
stages.PROVISION, stages.PREPARE, stages.RUN, stages.CLEANUP,
stages.TEARDOWN
]
self.spec = mock.MagicMock()
self.collector = mock.Mock()
def testCreateProvisionFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.provision_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.PROVISION)
def testCreatePrepareFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.prepare_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.PREPARE)
def testCreateRunFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.run_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.RUN)
def testCreateCleanupFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.cleanup_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.CLEANUP)
def testCreateTeardownFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.teardown_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.TEARDOWN)
def testDontCreateFailedRunSample(self):
self.flags_mock.create_failed_run_samples = False
self.run_mock.side_effect = Exception('error')
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_not_called()
class TestMakeFailedRunSample(unittest.TestCase):
@mock.patch('perfkitbenchmarker.sample.Sample')
def testMakeFailedRunSample(self, sample_mock):
error_msg = 'error'
spec = mock.MagicMock()
spec.vms = []
pkb.MakeFailedRunSample(spec, error_msg, stages.PROVISION)
sample_mock.assert_called_once()
sample_mock.assert_called_with('Run Failed', 1, 'Run Failed', {
'error_message': error_msg,
'run_stage': stages.PROVISION,
'flags': '{}'
})
@mock.patch('perfkitbenchmarker.sample.Sample')
def testMakeFailedRunSampleWithTruncation(self, sample_mock):
error_msg = 'This is a long error message that should be truncated.'
spec = mock.MagicMock()
spec.vms = []
pkb.FLAGS.failed_run_samples_error_length = 7
pkb.MakeFailedRunSample(spec, error_msg, stages.PROVISION)
sample_mock.assert_called_once()
sample_mock.assert_called_with('Run Failed', 1, 'Run Failed', {
'error_message': 'This is',
'run_stage': stages.PROVISION,
'flags': '{}'
})
if __name__ == '__main__':
unittest.main()
| 35.366197
| 77
| 0.7501
|
4a0ef90859a28975cc3f6ee9da4a3ad21b773b25
| 786
|
py
|
Python
|
gvsoc/gvsoc/models/pulp/chips/vivosoc3_1/apb_soc.py
|
knmcguire/gap_sdk
|
7b0a09a353ab6f0550793d40bd46e98051f4a3d7
|
[
"Apache-2.0"
] | null | null | null |
gvsoc/gvsoc/models/pulp/chips/vivosoc3_1/apb_soc.py
|
knmcguire/gap_sdk
|
7b0a09a353ab6f0550793d40bd46e98051f4a3d7
|
[
"Apache-2.0"
] | null | null | null |
gvsoc/gvsoc/models/pulp/chips/vivosoc3_1/apb_soc.py
|
knmcguire/gap_sdk
|
7b0a09a353ab6f0550793d40bd46e98051f4a3d7
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
import vp_core as vp
class component(vp.component):
implementation = 'pulp.chips/vivosoc3_1/apb_soc_impl'
| 32.75
| 74
| 0.756997
|
4a0ef9e265c4d9de39e870d3d11cb3779cea9f03
| 2,101
|
py
|
Python
|
logger.py
|
bitmarker/rpi_i2c
|
e56989e8a451d01fc756d3ccf3d7f8ffa590ccce
|
[
"MIT"
] | 1
|
2016-10-18T16:27:28.000Z
|
2016-10-18T16:27:28.000Z
|
logger.py
|
bitmarker/rpi_i2c
|
e56989e8a451d01fc756d3ccf3d7f8ffa590ccce
|
[
"MIT"
] | null | null | null |
logger.py
|
bitmarker/rpi_i2c
|
e56989e8a451d01fc756d3ccf3d7f8ffa590ccce
|
[
"MIT"
] | null | null | null |
import threading
import time
import csv
from datetime import datetime
from sensors.MC9808 import MC9808
from sensors.Dummy import DummySensor
class Logger(threading.Thread):
sensor_list = []
cycle = 1
def __init__(self, filename):
threading.Thread.__init__(self)
self.kill_me = False
self.filename = filename
def add_sensor(self, sensor):
self.sensor_list.append(sensor)
def get_time(self):
return datetime.now().strftime('%d.%m.%Y %H:%M:%S')
def run(self):
self.kill_me = False
# Initialize the sensor
for sens in self.sensor_list:
print("Connecting sensor {0}...".format(sens.name))
sens.connect()
# Open the file
self.file = open(self.filename, 'a')
# Intialize the logger
self.csv = csv.writer(self.file, delimiter=';', quotechar='"')
# Run until stop is called
while not self.kill_me:
values = []
# Collect the values
for sens in self.sensor_list:
values += sens.values()
# Convert to string and replace the points
str_values = ["{0}".format(v).replace('.', ',') for v in values]
# Insert the timestamp
str_values.insert(0, self.get_time())
print(str_values)
# Write the values to the file
self.csv.writerow(str_values)
time.sleep(self.cycle)
else:
print("\nStopping...")
for sens in self.sensor_list:
print("Disconnecting sensor {0}...".format(sens.name))
sens.disconnect()
print("Closing file...")
self.file.close()
def stop(self):
self.kill_me = True
if __name__ == '__main__':
logger = Logger('output/log.csv')
logger.add_sensor(MC9808())
logger.start()
while logger.isAlive():
try:
time.sleep(1)
except KeyboardInterrupt:
logger.stop()
break
| 28.013333
| 76
| 0.548786
|
4a0efb0dc85c03f358eeb517ac50cdef3b4427e3
| 105
|
py
|
Python
|
OP-GAN/box_generation/seq2seq/models/__init__.py
|
ts170/OP-GAN
|
b9a6227aaa7befa2025ea1f07e62e7a7e9c7ce1e
|
[
"MIT"
] | 1
|
2021-08-25T02:36:02.000Z
|
2021-08-25T02:36:02.000Z
|
OP-GAN/box_generation/seq2seq/models/__init__.py
|
ts170/OP-GAN
|
b9a6227aaa7befa2025ea1f07e62e7a7e9c7ce1e
|
[
"MIT"
] | null | null | null |
OP-GAN/box_generation/seq2seq/models/__init__.py
|
ts170/OP-GAN
|
b9a6227aaa7befa2025ea1f07e62e7a7e9c7ce1e
|
[
"MIT"
] | null | null | null |
from .PreEncoderRNN import PreEncoderRNN
from .DecoderRNN import DecoderRNN
from .seq2seq import Seq2seq
| 26.25
| 40
| 0.857143
|
4a0efbc86701f16a8e8aa4c4412839e7e00ece7e
| 4,026
|
py
|
Python
|
stratatools/crypto.py
|
bvanheu/stratasys
|
a67797f5b5dad066dd93fec0d7cd9e2992c2c452
|
[
"BSD-3-Clause"
] | 38
|
2015-01-02T09:23:11.000Z
|
2018-01-12T11:33:03.000Z
|
stratatools/crypto.py
|
bvanheu/stratasys
|
a67797f5b5dad066dd93fec0d7cd9e2992c2c452
|
[
"BSD-3-Clause"
] | 45
|
2015-01-04T06:14:32.000Z
|
2017-12-06T10:44:44.000Z
|
stratatools/crypto.py
|
bvanheu/stratasys
|
a67797f5b5dad066dd93fec0d7cd9e2992c2c452
|
[
"BSD-3-Clause"
] | 26
|
2015-02-20T17:58:32.000Z
|
2017-11-30T07:40:00.000Z
|
#
# See the LICENSE file
#
import binascii
from Crypto.Cipher import DES
from Crypto.Util.strxor import strxor
class Crypto():
def __init__(self):
pass
def encrypt(self, key, plaintext):
pass
def decrypt(self, key, ciphertext):
pass
class Desx_Crypto(Crypto):
def __init__(self):
self.clorox = [
0xBD,0x56,0xEA,0xF2,0xA2,0xF1,0xAC,0x2A,0xB0,0x93,0xD1,
0x9C,0x1B,0x33,0xFD,0xD0,0x30,0x04,0xB6,0xDC,0x7D,0xDF,
0x32,0x4B,0xF7,0xCB,0x45,0x9B,0x31,0xBB,0x21,0x5A,0x41,
0x9F,0xE1,0xD9,0x4A,0x4D,0x9E,0xDA,0xA0,0x68,0x2C,0xC3,
0x27,0x5F,0x80,0x36,0x3E,0xEE,0xFB,0x95,0x1A,0xFE,0xCE,
0xA8,0x34,0xA9,0x13,0xF0,0xA6,0x3F,0xD8,0x0C,0x78,0x24,
0xAF,0x23,0x52,0xC1,0x67,0x17,0xF5,0x66,0x90,0xE7,0xE8,
0x07,0xB8,0x60,0x48,0xE6,0x1E,0x53,0xF3,0x92,0xA4,0x72,
0x8C,0x08,0x15,0x6E,0x86,0x00,0x84,0xFA,0xF4,0x7F,0x8A,
0x42,0x19,0xF6,0xDB,0xCD,0x14,0x8D,0x50,0x12,0xBA,0x3C,
0x06,0x4E,0xEC,0xB3,0x35,0x11,0xA1,0x88,0x8E,0x2B,0x94,
0x99,0xB7,0x71,0x74,0xD3,0xE4,0xBF,0x3A,0xDE,0x96,0x0E,
0xBC,0x0A,0xED,0x77,0xFC,0x37,0x6B,0x03,0x79,0x89,0x62,
0xC6,0xD7,0xC0,0xD2,0x7C,0x6A,0x8B,0x22,0xA3,0x5B,0x05,
0x5D,0x02,0x75,0xD5,0x61,0xE3,0x18,0x8F,0x55,0x51,0xAD,
0x1F,0x0B,0x5E,0x85,0xE5,0xC2,0x57,0x63,0xCA,0x3D,0x6C,
0xB4,0xC5,0xCC,0x70,0xB2,0x91,0x59,0x0D,0x47,0x20,0xC8,
0x4F,0x58,0xE0,0x01,0xE2,0x16,0x38,0xC4,0x6F,0x3B,0x0F,
0x65,0x46,0xBE,0x7E,0x2D,0x7B,0x82,0xF9,0x40,0xB5,0x1D,
0x73,0xF8,0xEB,0x26,0xC7,0x87,0x97,0x25,0x54,0xB1,0x28,
0xAA,0x98,0x9D,0xA5,0x64,0x6D,0x7A,0xD4,0x10,0x81,0x44,
0xEF,0x49,0xD6,0xAE,0x2E,0xDD,0x76,0x5C,0x2F,0xA7,0x1C,
0xC9,0x09,0x69,0x9A,0x83,0xCF,0x29,0x39,0xB9,0xE9,0x4C,
0xFF,0x43,0xAB]
def build_whitening_keys(self, key):
input_whitener = bytearray(key[8:16])
output_whitener = bytearray(8)
clorox_i = 0
for i in range(8):
clorox_i = output_whitener[0] ^ output_whitener[1] & 0xff
for j in range(7):
output_whitener[j] = output_whitener[j+1]
output_whitener[7] = self.clorox[clorox_i] ^ key[i] & 0xff
for i in range(8):
clorox_i = output_whitener[0] ^ output_whitener[1]
for j in range(7):
output_whitener[j] = output_whitener[j+1]
output_whitener[7] = self.clorox[clorox_i] ^ key[i+8]
return (input_whitener, output_whitener)
def encrypt(self, key, plaintext):
(input_whitening_key, output_whitening_key) = self.build_whitening_keys(key)
ciphertext = bytearray(len(plaintext))
if (len(plaintext) % 8):
raise Exception("plaintext length must be a multiple of 8")
des = DES.new(str(key[0:8]), DES.MODE_CBC, str(bytearray(8))).encrypt
for i in range(len(plaintext)/8):
ciphertext[i*8:i*8+8] = strxor(output_whitening_key, des(strxor(input_whitening_key, str(plaintext[i*8:i*8+8]))))
des = DES.new(str(key[0:8]), DES.MODE_CBC, str(bytearray(8))).encrypt
return ciphertext
def decrypt(self, key, ciphertext):
(input_whitening_key, output_whitening_key) = self.build_whitening_keys(key)
plaintext = bytearray(len(ciphertext))
if (len(ciphertext) % 8):
raise Exception("ciphertext length must be a multiple of 8")
des = DES.new(str(key[0:8]), DES.MODE_CBC, str(bytearray(8))).decrypt
for i in range(len(ciphertext)/8):
plaintext[i*8:i*8+8] = bytearray(strxor(input_whitening_key, des(strxor(output_whitening_key, str(ciphertext[i*8:i*8+8])))))
des = DES.new(str(key[0:8]), DES.MODE_CBC, str(bytearray(8))).decrypt
return plaintext
| 41.081633
| 136
| 0.616741
|
4a0efc388bb3d2b256b90765ad1b89f62e4413bf
| 2,130
|
py
|
Python
|
logic/menu_info_logic.py
|
wuxh123/my_bottle
|
06cd7cda43b5d7db7522f76e65631510dada1329
|
[
"Apache-2.0"
] | 3
|
2019-02-28T06:44:56.000Z
|
2022-03-23T12:26:42.000Z
|
code/logic/menu_info_logic.py
|
kadycui/BottleAdmin
|
427a0fa8a63680b49565d131f3f4f6aac228efad
|
[
"MIT"
] | null | null | null |
code/logic/menu_info_logic.py
|
kadycui/BottleAdmin
|
427a0fa8a63680b49565d131f3f4f6aac228efad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
from logic import _logic_base
from config import db_config
from common import encrypt_helper, cache_helper
class MenuInfoLogic(_logic_base.LogicBase):
"""菜单管理表逻辑类"""
def __init__(self):
# 表名称
self.__table_name = 'menu_info'
# 初始化
_logic_base.LogicBase.__init__(self, db_config.DB, db_config.IS_OUTPUT_SQL, self.__table_name)
def get_model_for_url(self, key):
"""通过当前页面路由url,获取菜单对应的记录"""
# 使用md5生成对应的缓存key值
key_md5 = encrypt_helper.md5(key)
# 从缓存中提取菜单记录
model = cache_helper.get(key_md5)
# 记录不存在时,运行记录载入缓存程序
if not model:
self._load_cache()
model = cache_helper.get(key_md5)
return model
def _load_cache(self):
"""全表记录载入缓存"""
# 生成缓存载入状态key,主要用于检查是否已执行了菜单表载入缓存判断
cache_key = self.__table_name + '_is_load'
# 将自定义的key存储到全局缓存队列中(关于全局缓存队列请查看前面ORM对应章节说明)
self.add_relevance_cache_in_list(cache_key)
# 获取缓存载入状态,检查记录是否已载入缓存,是的话则不再执行
if cache_helper.get(cache_key):
return
# 从数据库中读取全部记录
result = self.get_list()
# 标记记录已载入缓存
cache_helper.set(cache_key, True)
# 如果菜单表没有记录,则直接退出
if not result:
return
# 循环遍历所有记录,组合处理后,存储到nosql缓存中
for model in result.get('rows', {}):
# 提取菜单页面对应的接口(后台菜单管理中的接口值,同一个菜单操作时,经常需要访问多个接口,所以这个值有中存储多们接口值)
interface_url = model.get('interface_url', '')
if not interface_url:
continue
# 获取前端html页面地址
page_url = model.get('page_url', '')
# 同一页面接口可能有多个,所以需要进行分割
interface_url_arr = interface_url.replace('\n', '').replace(' ', '').split(',')
# 逐个接口处理
for interface in interface_url_arr:
# html+接口组合生成key
url_md5 = encrypt_helper.md5(page_url + interface)
# 存储到全局缓存队列中,方便菜单记录更改时,自动清除这些自定义缓存
self.add_relevance_cache_in_list(url_md5)
# 存储到nosql缓存
cache_helper.set(url_md5, model)
| 32.769231
| 102
| 0.603756
|
4a0efcc6fa3dded907138a64f3cf750ed758ce63
| 5,962
|
py
|
Python
|
dags/email_to_bigquery_dag.py
|
Ressmann/starthinker
|
301c5cf17e382afee346871974ca2f4ae905a94a
|
[
"Apache-2.0"
] | 138
|
2018-11-28T21:42:44.000Z
|
2022-03-30T17:26:35.000Z
|
dags/email_to_bigquery_dag.py
|
Ressmann/starthinker
|
301c5cf17e382afee346871974ca2f4ae905a94a
|
[
"Apache-2.0"
] | 36
|
2019-02-19T18:33:20.000Z
|
2022-01-24T18:02:44.000Z
|
dags/email_to_bigquery_dag.py
|
Ressmann/starthinker
|
301c5cf17e382afee346871974ca2f4ae905a94a
|
[
"Apache-2.0"
] | 54
|
2018-12-06T05:47:32.000Z
|
2022-02-21T22:01:01.000Z
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Email Fetch
Import emailed CM report, Dv360 report, csv, or excel into a BigQuery table.
- The person executing this recipe must be the recipient of the email.
- Give a regular expression to match the email subject, link or attachment.
- The data downloaded will overwrite the table specified.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read':'user', # Credentials used for reading data.
'email_from':'', # Must match from field.
'email_to':'', # Must match to field.
'subject':'', # Regular expression to match subject.
'link':'', # Regular expression to match email.
'attachment':'', # Regular expression to match atttachment.
'dataset':'', # Existing dataset in BigQuery.
'table':'', # Name of table to be written to.
'schema':'[]', # Schema provided in JSON list format or empty list.
'header':False, # Does the csv contain a header row.
'is_incremental_load':False, # Append report data to table based on date column, de-duplicates.
}
RECIPE = {
'tasks':[
{
'email':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'read':{
'from':{'field':{'name':'email_from','kind':'string','order':1,'default':'','description':'Must match from field.'}},
'to':{'field':{'name':'email_to','kind':'string','order':2,'default':'','description':'Must match to field.'}},
'subject':{'field':{'name':'subject','kind':'string','order':3,'default':'','description':'Regular expression to match subject.'}},
'link':{'field':{'name':'link','kind':'string','order':4,'default':'','description':'Regular expression to match email.'}},
'attachment':{'field':{'name':'attachment','kind':'string','order':5,'default':'','description':'Regular expression to match atttachment.'}}
},
'write':{
'bigquery':{
'dataset':{'field':{'name':'dataset','kind':'string','order':6,'default':'','description':'Existing dataset in BigQuery.'}},
'table':{'field':{'name':'table','kind':'string','order':7,'default':'','description':'Name of table to be written to.'}},
'schema':{'field':{'name':'schema','kind':'json','order':8,'default':'[]','description':'Schema provided in JSON list format or empty list.'}},
'header':{'field':{'name':'header','kind':'boolean','order':9,'default':False,'description':'Does the csv contain a header row.'}},
'is_incremental_load':{'field':{'name':'is_incremental_load','kind':'boolean','order':10,'default':False,'description':'Append report data to table based on date column, de-duplicates.'}}
}
}
}
}
]
}
dag_maker = DAG_Factory('email_to_bigquery', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| 44.827068
| 199
| 0.618752
|
4a0efcf875b3b168efcd45a64564485a78987ba7
| 3,788
|
py
|
Python
|
jetengine/query_builder/field_list.py
|
kpdemetriou/jetengine
|
1b931efb4a4b0ad7a473773aa6fcf0677c71f122
|
[
"BSD-3-Clause"
] | 5
|
2019-06-24T08:57:42.000Z
|
2021-12-17T22:58:08.000Z
|
jetengine/query_builder/field_list.py
|
kpdemetriou/jetengine
|
1b931efb4a4b0ad7a473773aa6fcf0677c71f122
|
[
"BSD-3-Clause"
] | 1
|
2020-02-24T15:04:13.000Z
|
2020-07-31T11:47:44.000Z
|
jetengine/query_builder/field_list.py
|
kpdemetriou/jetengine
|
1b931efb4a4b0ad7a473773aa6fcf0677c71f122
|
[
"BSD-3-Clause"
] | 3
|
2019-11-07T13:57:40.000Z
|
2021-12-17T22:58:00.000Z
|
from jetengine.query_builder.transform import transform_field_list_query
__all__ = ("QueryFieldList",)
class QueryFieldList(object):
"""Object that handles combinations of .only() and .exclude() calls"""
ONLY = 1
EXCLUDE = 0
def __init__(self, fields=None, value=ONLY, always_include=None, _only_called=False):
"""
The QueryFieldList builder
:param fields: A list of fields used in `.only()` or `.exclude()`
:param value: How to handle the fields; either `ONLY` or `EXCLUDE`
:param always_include: Any fields to always_include eg `_cls`
:param _only_called: Has `.only()` been called? If so its a set of
fields otherwise it performs a union.
"""
self.value = value
self.fields = set(fields or [])
self.always_include = set(always_include or [])
self._id = None
self._only_called = _only_called
self.slice = {}
def __add__(self, f):
if isinstance(f.value, dict):
for field in f.fields:
self.slice[field] = f.value
if not self.fields:
self.fields = f.fields
elif not self.fields:
self.fields = f.fields
self.value = f.value
self.slice = {}
elif self.value is self.ONLY and f.value is self.ONLY:
self._clean_slice()
if self._only_called:
self.fields = self.fields.union(f.fields)
else:
self.fields = f.fields
elif self.value is self.EXCLUDE and f.value is self.EXCLUDE:
self.fields = self.fields.union(f.fields)
self._clean_slice()
elif self.value is self.ONLY and f.value is self.EXCLUDE:
self.fields -= f.fields
self._clean_slice()
elif self.value is self.EXCLUDE and f.value is self.ONLY:
self.value = self.ONLY
self.fields = f.fields - self.fields
self._clean_slice()
# _id should be saved separately to avoid situation such as
# exclude('_id').only('other') so the previous code of this method
# remove _id from self.fields (its a normal behavior for any field
# except for _id because _id field cannot be removed with only)
if "_id" in f.fields:
self._id = f.value
if self.always_include:
if self.value is self.ONLY and self.fields:
if sorted(self.slice.keys()) != sorted(self.fields):
self.fields = self.fields.union(self.always_include)
else:
# if this is exclude - remove from fields values from
# always included fields
self.fields -= self.always_include
if getattr(f, "_only_called", False):
self._only_called = True
return self
# python2
def __nonzero__(self):
return bool(self.fields)
# python3
def __bool__(self):
return bool(self.fields)
def as_dict(self):
field_list = dict((field, self.value) for field in self.fields)
if self.slice:
field_list.update(self.slice)
if self._id is not None:
field_list["_id"] = self._id
return field_list
def to_query(self, document):
""" Transform to query using db names for fields
:param document - class of the document
"""
return transform_field_list_query(document, self.as_dict())
def reset(self):
self.fields = set([])
self.slice = {}
self.value = self.ONLY
self._id = None
def _clean_slice(self):
if self.slice:
for field in set(self.slice.keys()) - self.fields:
del self.slice[field]
| 33.821429
| 89
| 0.585005
|
4a0eff45f4483c183b95c8dfa5afb699cab805f8
| 4,774
|
py
|
Python
|
mosse2.py
|
mintaka33/visual-object-tracking
|
d7583881dc21140b9cbc200a68ef6be3f306aaa4
|
[
"MIT"
] | 1
|
2022-01-21T02:10:47.000Z
|
2022-01-21T02:10:47.000Z
|
mosse2.py
|
mintaka33/gpu-object-tracking
|
d7583881dc21140b9cbc200a68ef6be3f306aaa4
|
[
"MIT"
] | null | null | null |
mosse2.py
|
mintaka33/gpu-object-tracking
|
d7583881dc21140b9cbc200a68ef6be3f306aaa4
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import cv2
def rnd_warp(a):
h, w = a.shape[:2]
T = np.zeros((2, 3))
coef = 0.2
ang = (np.random.rand()-0.5)*coef
c, s = np.cos(ang), np.sin(ang)
T[:2, :2] = [[c,-s], [s, c]]
T[:2, :2] += (np.random.rand(2, 2) - 0.5)*coef
c = (w/2, h/2)
T[:,2] = c - np.dot(T[:2, :2], c)
return cv2.warpAffine(a, T, (w, h), borderMode = cv2.BORDER_REFLECT)
def divSpec(A, B):
Ar, Ai = A[...,0], A[...,1]
Br, Bi = B[...,0], B[...,1]
C = (Ar+1j*Ai)/(Br+1j*Bi)
C = np.dstack([np.real(C), np.imag(C)]).copy()
return C
def gaussian2(w, h, sigma=2.0):
xs, ys = np.meshgrid(np.arange(w), np.arange(h))
center_x, center_y = w / 2, h / 2
dist = ((xs - center_x) ** 2 + (ys - center_y) ** 2) / (sigma**2)
g = np.exp(-0.5*dist).astype(np.float32)
return g
def gaussian(w, h):
g = np.zeros((h, w), np.float32)
g[h//2, w//2] = 1
g = cv2.GaussianBlur(g, (-1, -1), 2.0)
g /= g.max()
return g
eps = 1e-5
class MOSSE:
def __init__(self, frame, rect):
x1, y1, x2, y2 = rect
w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
self.size = w, h
img = cv2.getRectSubPix(frame, (w, h), (x, y))
self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
g = gaussian2(w, h)
self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 = np.zeros_like(self.G)
self.H2 = np.zeros_like(self.G)
for _i in range(128):
a = self.preprocess(rnd_warp(img))
A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
self.H2 += cv2.mulSpectrums( A, A, 0, conjB=True)
self.update_kernel()
self.update(frame)
def update(self, frame, rate = 0.125):
(x, y), (w, h) = self.pos, self.size
self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y))
img = self.preprocess(img)
self.last_resp, (dx, dy), self.psr = self.correlate(img)
self.good = self.psr > 8.0
#if not self.good:
#return
self.pos = x+dx, y+dy
self.last_img = img = cv2.getRectSubPix(frame, (w, h), self.pos)
img = self.preprocess(img)
A = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
H1 = cv2.mulSpectrums(self.G, A, 0, conjB=True)
H2 = cv2.mulSpectrums( A, A, 0, conjB=True)
self.H1 = self.H1 * (1.0-rate) + H1 * rate
self.H2 = self.H2 * (1.0-rate) + H2 * rate
self.update_kernel()
(x, y), (w, h) = self.pos, self.size
x1, y1, x2, y2 = int(x-0.5*w), int(y-0.5*h), int(x+0.5*w), int(y+0.5*h)
return (self.psr, x1, y1, x2, y2)
def preprocess(self, img):
img = np.log(np.float32(img)+1.0)
img = (img-img.mean()) / (img.std()+eps)
return img*self.win
def correlate(self, img):
C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)
resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
h, w = resp.shape
_, mval, _, (mx, my) = cv2.minMaxLoc(resp)
side_resp = resp.copy()
cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
smean, sstd = side_resp.mean(), side_resp.std()
psr = (mval-smean) / (sstd+eps)
return resp, (mx-w//2, my-h//2), psr
def update_kernel(self):
self.H = divSpec(self.H1, self.H2)
self.H[...,1] *= -1
def main():
tracker = None
cap = cv2.VideoCapture('test.264')
if not cap.isOpened():
print("ERROR: cannot open video file!")
exit()
init_bb = None
while True:
ret, frame = cap.read()
if ret == False:
break
frame_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if init_bb is None:
init_bb = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True)
init_bb = [6, 599, 517, 421] # use fixed rect for debugging
x, y, w, h = init_bb[0], init_bb[1], init_bb[2], init_bb[3]
if tracker is None:
tracker = MOSSE(frame_gray, (x, y, x+w, y+h))
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
print(init_bb)
else:
(psr, x1, y1, x2, y2) = tracker.update(frame_gray)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
print('%8.4f'%psr, x1, y1, x2, y2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(0) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
print('done')
| 33.619718
| 95
| 0.52367
|
4a0eff93a066787b937cfd616b95b4a146b4ae3d
| 26,667
|
py
|
Python
|
tests/integration/test_lambda.py
|
anghami/localstack
|
6113d9f588b145a3b3ac3bf4da64f73b8529dd76
|
[
"Apache-2.0"
] | 4
|
2019-01-12T22:48:24.000Z
|
2019-12-10T06:56:17.000Z
|
tests/integration/test_lambda.py
|
anghami/localstack
|
6113d9f588b145a3b3ac3bf4da64f73b8529dd76
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_lambda.py
|
anghami/localstack
|
6113d9f588b145a3b3ac3bf4da64f73b8529dd76
|
[
"Apache-2.0"
] | 1
|
2021-08-24T21:23:22.000Z
|
2021-08-24T21:23:22.000Z
|
import re
import os
import json
import time
import unittest
from io import BytesIO
from requests.models import Response
from localstack import config
from localstack.constants import LOCALSTACK_ROOT_FOLDER, LOCALSTACK_MAVEN_VERSION
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
short_uid, load_file, to_str, mkdir, download, run_safe, get_free_tcp_port, get_service_protocol)
from localstack.services.infra import start_proxy
from localstack.services.awslambda import lambda_api, lambda_executors
from localstack.services.generic_proxy import ProxyListener
from localstack.services.awslambda.lambda_api import (
LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_RUBY25, LAMBDA_RUNTIME_PYTHON27,
use_docker, LAMBDA_RUNTIME_PYTHON36, LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_NODEJS810, LAMBDA_RUNTIME_CUSTOM_RUNTIME
)
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.py')
TEST_LAMBDA_PYTHON3 = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_python3.py')
TEST_LAMBDA_NODEJS = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.js')
TEST_LAMBDA_RUBY = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.rb')
TEST_LAMBDA_DOTNETCORE2 = os.path.join(THIS_FOLDER, 'lambdas', 'dotnetcore2', 'dotnetcore2.zip')
TEST_LAMBDA_CUSTOM_RUNTIME = os.path.join(THIS_FOLDER, 'lambdas', 'custom-runtime')
TEST_LAMBDA_JAVA = os.path.join(LOCALSTACK_ROOT_FOLDER, 'localstack', 'infra', 'localstack-utils-tests.jar')
TEST_LAMBDA_ENV = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_environment.py')
TEST_LAMBDA_NAME_PY = 'test_lambda_py'
TEST_LAMBDA_NAME_PY3 = 'test_lambda_py3'
TEST_LAMBDA_NAME_JS = 'test_lambda_js'
TEST_LAMBDA_NAME_RUBY = 'test_lambda_ruby'
TEST_LAMBDA_NAME_DOTNETCORE2 = 'test_lambda_dotnetcore2'
TEST_LAMBDA_NAME_CUSTOM_RUNTIME = 'test_lambda_custom_runtime'
TEST_LAMBDA_NAME_JAVA = 'test_lambda_java'
TEST_LAMBDA_NAME_JAVA_STREAM = 'test_lambda_java_stream'
TEST_LAMBDA_NAME_JAVA_SERIALIZABLE = 'test_lambda_java_serializable'
TEST_LAMBDA_NAME_ENV = 'test_lambda_env'
MAVEN_BASE_URL = 'https://repo.maven.apache.org/maven2'
TEST_LAMBDA_JAR_URL = ('{url}/cloud/localstack/{name}/{version}/{name}-{version}-tests.jar').format(
version=LOCALSTACK_MAVEN_VERSION, url=MAVEN_BASE_URL, name='localstack-utils')
TEST_LAMBDA_LIBS = ['localstack', 'localstack_client', 'requests',
'psutil', 'urllib3', 'chardet', 'certifi', 'idna', 'pip', 'dns']
class LambdaTestBase(unittest.TestCase):
def check_lambda_logs(self, func_name, expected_lines=[]):
logs_client = aws_stack.connect_to_service('logs')
log_group_name = '/aws/lambda/%s' % func_name
streams = logs_client.describe_log_streams(logGroupName=log_group_name)['logStreams']
streams = sorted(streams, key=lambda x: x['creationTime'], reverse=True)
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=streams[0]['logStreamName'])['events']
log_messages = [e['message'] for e in log_events]
for line in expected_lines:
if '.*' in line:
found = [re.match(line, m) for m in log_messages]
if any(found):
continue
self.assertIn(line, log_messages)
class TestLambdaBaseFeatures(unittest.TestCase):
def test_forward_to_fallback_url_dynamodb(self):
db_table = 'lambda-records'
ddb_client = aws_stack.connect_to_service('dynamodb')
def num_items():
return len((run_safe(ddb_client.scan, TableName=db_table) or {'Items': []})['Items'])
items_before = num_items()
self.run_forward_to_fallback_url('dynamodb://%s' % db_table)
items_after = num_items()
self.assertEqual(items_after, items_before + 3)
def test_forward_to_fallback_url_http(self):
class MyUpdateListener(ProxyListener):
def forward_request(self, method, path, data, headers):
records.append(data)
response = Response()
response.status_code = 200
response._content = ''
return response
records = []
local_port = get_free_tcp_port()
proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener())
items_before = len(records)
self.run_forward_to_fallback_url('%s://localhost:%s' % (get_service_protocol(), local_port))
items_after = len(records)
self.assertEqual(items_after, items_before + 3)
proxy.stop()
def run_forward_to_fallback_url(self, url, num_requests=3):
lambda_client = aws_stack.connect_to_service('lambda')
config.LAMBDA_FALLBACK_URL = url
try:
for i in range(num_requests):
lambda_client.invoke(FunctionName='non-existing-lambda-%s' % i,
Payload=b'{}', InvocationType='RequestResponse')
finally:
config.LAMBDA_FALLBACK_URL = ''
class TestPythonRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
cls.s3_client = aws_stack.connect_to_service('s3')
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_PY,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27
)
@classmethod
def tearDownClass(cls):
testutil.delete_lambda_function(TEST_LAMBDA_NAME_PY)
def test_invocation_type_not_set(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}')
result_data = json.loads(result['Payload'].read())
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(result_data['event'], json.loads('{}'))
def test_invocation_type_request_response(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY,
Payload=b'{}', InvocationType='RequestResponse')
result_data = result['Payload'].read()
result_data = json.loads(to_str(result_data))
self.assertEqual(result['StatusCode'], 200)
self.assertIsInstance(result_data, dict)
def test_invocation_type_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY,
Payload=b'{}', InvocationType='Event')
self.assertEqual(result['StatusCode'], 202)
def test_invocation_type_dry_run(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}',
InvocationType='DryRun')
self.assertEqual(result['StatusCode'], 204)
def test_lambda_environment(self):
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_ENV),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_ENV,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27,
envvars={'Hello': 'World'}
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_ENV, Payload=b'{}')
result_data = result['Payload']
self.assertEqual(result['StatusCode'], 200)
self.assertDictEqual(json.load(result_data), {'Hello': 'World'})
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_ENV)
def test_invocation_with_qualifier(self):
lambda_name = 'test_lambda_%s' % short_uid()
bucket_name = 'test_bucket_lambda2'
bucket_key = 'test_lambda.zip'
# upload zip file to S3
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.upload_fileobj(
BytesIO(zip_file), bucket_name, bucket_key)
# create lambda function
response = self.lambda_client.create_function(
FunctionName=lambda_name, Handler='handler.handler',
Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
Code={
'S3Bucket': bucket_name,
'S3Key': bucket_key
},
Publish=True
)
self.assertIn('Version', response)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = self.lambda_client.invoke(
FunctionName=lambda_name,
Payload=data_before,
Qualifier=response['Version']
)
data_after = json.loads(result['Payload'].read())
self.assertEqual(json.loads(to_str(data_before)), data_after['event'])
context = data_after['context']
self.assertEqual(response['Version'], context['function_version'])
self.assertEqual(lambda_name, context['function_name'])
# assert that logs are present
expected = ['Lambda log message - print function']
if use_docker():
# Note that during regular test execution, nosetests captures the output from
# the logging module - hence we can only expect this when running in Docker
expected.append('.*Lambda log message - logging module')
self.check_lambda_logs(lambda_name, expected_lines=expected)
# clean up
testutil.delete_lambda_function(lambda_name)
def test_upload_lambda_from_s3(self):
lambda_name = 'test_lambda_%s' % short_uid()
bucket_name = 'test_bucket_lambda'
bucket_key = 'test_lambda.zip'
# upload zip file to S3
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.upload_fileobj(
BytesIO(zip_file), bucket_name, bucket_key)
# create lambda function
self.lambda_client.create_function(
FunctionName=lambda_name, Handler='handler.handler',
Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
Code={
'S3Bucket': bucket_name,
'S3Key': bucket_key
}
)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = self.lambda_client.invoke(
FunctionName=lambda_name, Payload=data_before)
data_after = json.loads(result['Payload'].read())
self.assertEqual(json.loads(to_str(data_before)), data_after['event'])
context = data_after['context']
self.assertEqual('$LATEST', context['function_version'])
self.assertEqual(lambda_name, context['function_name'])
# clean up
testutil.delete_lambda_function(lambda_name)
def test_python_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON3),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON36
)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_PY3,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON36
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_PY3, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_PY3)
class TestNodeJSRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_nodejs_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_zip_file(
TEST_LAMBDA_NODEJS, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JS,
zip_file=zip_file,
handler='lambda_integration.handler',
runtime=LAMBDA_RUNTIME_NODEJS810
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JS, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# assert that logs are present
expected = ['.*Node.js Lambda handler executing.']
self.check_lambda_logs(TEST_LAMBDA_NAME_JS, expected_lines=expected)
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JS)
class TestCustomRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_nodejs_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_zip_file(
TEST_LAMBDA_CUSTOM_RUNTIME, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_CUSTOM_RUNTIME,
zip_file=zip_file,
handler='function.handler',
runtime=LAMBDA_RUNTIME_CUSTOM_RUNTIME
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_CUSTOM_RUNTIME,
Payload=b'{"text":"bar with \'quotes\\""}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(
to_str(result_data).strip(),
"""Echoing request: '{"text": "bar with \'quotes\\""}'""")
# assert that logs are present
expected = ['.*Custom Runtime Lambda handler executing.']
self.check_lambda_logs(
TEST_LAMBDA_NAME_CUSTOM_RUNTIME, expected_lines=expected)
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_CUSTOM_RUNTIME)
class TestDotNetCoreRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
# lambda .NET Core 2.0 is already a zip
zip_file = TEST_LAMBDA_DOTNETCORE2
cls.zip_file_content = None
with open(zip_file, 'rb') as file_obj:
cls.zip_file_content = file_obj.read()
def test_dotnet_lambda_running_in_docker(self):
if not use_docker():
return
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_DOTNETCORE2,
zip_file=self.zip_file_content,
handler='DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler',
runtime=LAMBDA_RUNTIME_DOTNETCORE2
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_DOTNETCORE2, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# assert that logs are present
expected = ['Running .NET Core 2.0 Lambda']
self.check_lambda_logs(TEST_LAMBDA_NAME_DOTNETCORE2, expected_lines=expected)
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_DOTNETCORE2)
class TestRubyRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_ruby_lambda_running_in_docker(self):
if not use_docker():
return
zip_file = testutil.create_zip_file(
TEST_LAMBDA_RUBY, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_RUBY,
zip_file=zip_file,
handler='lambda_integration.handler',
runtime=LAMBDA_RUNTIME_RUBY25
)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_RUBY, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_RUBY)
class TestJavaRuntimes(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
# deploy lambda - Java
if not os.path.exists(TEST_LAMBDA_JAVA):
mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
# Lambda supports single JAR deployments without the zip,
# so we upload the JAR directly.
cls.test_java_jar = load_file(TEST_LAMBDA_JAVA, mode='rb')
cls.test_java_zip = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA,
zip_file=cls.test_java_jar,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.LambdaHandler'
)
# deploy lambda - Java with stream handler
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA_STREAM,
zip_file=cls.test_java_jar,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.LambdaStreamHandler'
)
# deploy lambda - Java with serializable input object
testutil.create_lambda_function(
func_name=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
zip_file=cls.test_java_zip,
runtime=LAMBDA_RUNTIME_JAVA8,
handler='cloud.localstack.sample.SerializedInputLambdaHandler'
)
@classmethod
def tearDownClass(cls):
# clean up
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA)
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_STREAM)
testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_SERIALIZABLE)
def test_java_runtime(self):
self.assertIsNotNone(self.test_java_jar)
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertIn('LinkedHashMap', to_str(result_data))
def test_sns_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}')
self.assertEqual(result['StatusCode'], 202)
def test_ddb_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}')
self.assertEqual(result['StatusCode'], 202)
def test_kinesis_invocation(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA,
Payload=b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertIn('KinesisEvent', to_str(result_data))
def test_kinesis_event(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event',
Payload=b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 202)
self.assertEqual(to_str(result_data).strip(), '')
def test_stream_handler(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM, Payload=b'{}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertEqual(to_str(result_data).strip(), '{}')
def test_serializable_input_object(self):
result = self.lambda_client.invoke(
FunctionName=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE,
Payload=b'{"bucket": "test_bucket", "key": "test_key"}')
result_data = result['Payload'].read()
self.assertEqual(result['StatusCode'], 200)
self.assertDictEqual(
json.loads(to_str(result_data)),
{'validated': True, 'bucket': 'test_bucket', 'key': 'test_key'}
)
class TestDockerBehaviour(LambdaTestBase):
@classmethod
def setUpClass(cls):
cls.lambda_client = aws_stack.connect_to_service('lambda')
def test_prime_and_destroy_containers(self):
# run these tests only for the "reuse containers" Lambda executor
if not isinstance(lambda_api.LAMBDA_EXECUTOR,
lambda_executors.LambdaExecutorReuseContainers):
return
executor = lambda_api.LAMBDA_EXECUTOR
func_name = 'test_prime_and_destroy_containers'
func_arn = lambda_api.func_arn(func_name)
# make sure existing containers are gone
executor.cleanup()
self.assertEqual(len(executor.get_all_container_names()), 0)
# deploy and invoke lambda without Docker
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_ENV),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=func_name,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27,
envvars={'Hello': 'World'}
)
self.assertEqual(len(executor.get_all_container_names()), 0)
self.assertDictEqual(executor.function_invoke_times, {})
# invoke a few times.
durations = []
num_iterations = 3
for i in range(0, num_iterations + 1):
prev_invoke_time = None
if i > 0:
prev_invoke_time = executor.function_invoke_times[func_arn]
start_time = time.time()
self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
duration = time.time() - start_time
self.assertEqual(len(executor.get_all_container_names()), 1)
# ensure the last invoke time is being updated properly.
if i > 0:
self.assertGreater(executor.function_invoke_times[func_arn], prev_invoke_time)
else:
self.assertGreater(executor.function_invoke_times[func_arn], 0)
durations.append(duration)
# the first call would have created the container. subsequent calls would reuse and be faster.
for i in range(1, num_iterations + 1):
self.assertLess(durations[i], durations[0])
status = executor.get_docker_container_status(func_arn)
self.assertEqual(status, 1)
container_network = executor.get_docker_container_network(func_arn)
self.assertEqual(container_network, 'default')
executor.cleanup()
status = executor.get_docker_container_status(func_arn)
self.assertEqual(status, 0)
self.assertEqual(len(executor.get_all_container_names()), 0)
# clean up
testutil.delete_lambda_function(func_name)
def test_docker_command_for_separate_container_lambda_executor(self):
# run these tests only for the "separate containers" Lambda executor
if not isinstance(lambda_api.LAMBDA_EXECUTOR,
lambda_executors.LambdaExecutorSeparateContainers):
return
executor = lambda_api.LAMBDA_EXECUTOR
func_name = 'test_docker_command_for_separate_container_lambda_executor'
func_arn = lambda_api.func_arn(func_name)
handler = 'handler'
lambda_cwd = '/app/lambda'
network = 'compose_network'
config.LAMBDA_DOCKER_NETWORK = network
cmd = executor.prepare_execution(func_arn, {}, LAMBDA_RUNTIME_NODEJS810, '', handler, lambda_cwd)
expected = 'docker run -v "%s":/var/task --network="%s" --rm "lambci/lambda:%s" "%s"' % (
lambda_cwd, network, LAMBDA_RUNTIME_NODEJS810, handler)
self.assertIn(('--network="%s"' % network), cmd, 'cmd=%s expected=%s' % (cmd, expected))
config.LAMBDA_DOCKER_NETWORK = ''
def test_destroy_idle_containers(self):
# run these tests only for the "reuse containers" Lambda executor
if not isinstance(lambda_api.LAMBDA_EXECUTOR,
lambda_executors.LambdaExecutorReuseContainers):
return
executor = lambda_api.LAMBDA_EXECUTOR
func_name = 'test_destroy_idle_containers'
func_arn = lambda_api.func_arn(func_name)
# make sure existing containers are gone
executor.destroy_existing_docker_containers()
self.assertEqual(len(executor.get_all_container_names()), 0)
# deploy and invoke lambda without Docker
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_ENV),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON27
)
testutil.create_lambda_function(
func_name=func_name,
zip_file=zip_file,
runtime=LAMBDA_RUNTIME_PYTHON27,
envvars={'Hello': 'World'}
)
self.assertEqual(len(executor.get_all_container_names()), 0)
self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}')
self.assertEqual(len(executor.get_all_container_names()), 1)
# try to destroy idle containers.
executor.idle_container_destroyer()
self.assertEqual(len(executor.get_all_container_names()), 1)
# simulate an idle container
executor.function_invoke_times[func_arn] = time.time() - lambda_executors.MAX_CONTAINER_IDLE_TIME_MS
executor.idle_container_destroyer()
self.assertEqual(len(executor.get_all_container_names()), 0)
# clean up
testutil.delete_lambda_function(func_name)
| 38.204871
| 108
| 0.665354
|
4a0f002191d74bb50bea698bd5937724a5cff101
| 1,648
|
py
|
Python
|
gamer_registration_system/con/tests/test_models.py
|
splummer/gamer_reg
|
7cccbbf8e6e52e46594c8128a7e7a523b8202f03
|
[
"MIT"
] | null | null | null |
gamer_registration_system/con/tests/test_models.py
|
splummer/gamer_reg
|
7cccbbf8e6e52e46594c8128a7e7a523b8202f03
|
[
"MIT"
] | null | null | null |
gamer_registration_system/con/tests/test_models.py
|
splummer/gamer_reg
|
7cccbbf8e6e52e46594c8128a7e7a523b8202f03
|
[
"MIT"
] | null | null | null |
import pytest
import datetime
from django.test import TestCase
from django.utils import timezone
from gamer_registration_system.con.models import Convention, Event, EventSchedule
# Create your tests here.
class EventScheduleModelTests(TestCase):
new_con = Convention(convention_name='Test Future Con')
new_event = Event(convention=new_con, title='Test Future Event')
def test_recent_event_with_future_start(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns False for events whose start_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_eventsched = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(future_eventsched.recent_event(), False)
def test_recent_event_with_old_event(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns False for events whose start_date is older than 1 day
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_event = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(old_event.recent_event(), False)
def test_recent_event_with_recent_question(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns True for events whose start_date is within the last day
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_event = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(recent_event.recent_event(), True)
| 43.368421
| 95
| 0.723908
|
4a0f00f6b661a2359cb63e23fdb1b3a23f2bf818
| 15,409
|
py
|
Python
|
aioesphomeapi/client.py
|
bdraco/aioesphomeapi
|
d285c26b16b7a9996f3cfd3d32e0e4516afb454a
|
[
"MIT"
] | null | null | null |
aioesphomeapi/client.py
|
bdraco/aioesphomeapi
|
d285c26b16b7a9996f3cfd3d32e0e4516afb454a
|
[
"MIT"
] | 6
|
2021-11-26T12:15:29.000Z
|
2022-03-31T12:20:03.000Z
|
aioesphomeapi/client.py
|
bdraco/aioesphomeapi
|
d285c26b16b7a9996f3cfd3d32e0e4516afb454a
|
[
"MIT"
] | null | null | null |
import logging
from typing import Any, Callable, Optional, Tuple
import aioesphomeapi.api_pb2 as pb
from aioesphomeapi.connection import APIConnection, ConnectionParams
from aioesphomeapi.core import APIConnectionError
from aioesphomeapi.model import *
_LOGGER = logging.getLogger(__name__)
class APIClient:
def __init__(self, eventloop, address: str, port: int, password: str, *,
client_info: str = 'aioesphomeapi', keepalive: float = 15.0):
self._params = ConnectionParams(
eventloop=eventloop,
address=address,
port=port,
password=password,
client_info=client_info,
keepalive=keepalive,
)
self._connection = None # type: Optional[APIConnection]
async def connect(self, on_stop=None, login=False):
if self._connection is not None:
raise APIConnectionError("Already connected!")
connected = False
stopped = False
async def _on_stop():
nonlocal stopped
if stopped:
return
stopped = True
self._connection = None
if connected and on_stop is not None:
await on_stop()
self._connection = APIConnection(self._params, _on_stop)
try:
await self._connection.connect()
if login:
await self._connection.login()
except APIConnectionError:
await _on_stop()
raise
except Exception as e:
await _on_stop()
raise APIConnectionError(
"Unexpected error while connecting: {}".format(e))
connected = True
async def disconnect(self, force=False):
if self._connection is None:
return
await self._connection.stop(force=force)
def _check_connected(self):
if self._connection is None:
raise APIConnectionError("Not connected!")
if not self._connection.is_connected:
raise APIConnectionError("Connection not done!")
def _check_authenticated(self):
self._check_connected()
if not self._connection.is_authenticated:
raise APIConnectionError("Not authenticated!")
async def device_info(self) -> DeviceInfo:
self._check_connected()
resp = await self._connection.send_message_await_response(
pb.DeviceInfoRequest(), pb.DeviceInfoResponse)
return DeviceInfo(
uses_password=resp.uses_password,
name=resp.name,
mac_address=resp.mac_address,
esphome_version=resp.esphome_version,
compilation_time=resp.compilation_time,
model=resp.model,
has_deep_sleep=resp.has_deep_sleep,
)
async def list_entities_services(self) -> Tuple[List[Any], List[UserService]]:
self._check_authenticated()
response_types = {
pb.ListEntitiesBinarySensorResponse: BinarySensorInfo,
pb.ListEntitiesCoverResponse: CoverInfo,
pb.ListEntitiesFanResponse: FanInfo,
pb.ListEntitiesLightResponse: LightInfo,
pb.ListEntitiesSensorResponse: SensorInfo,
pb.ListEntitiesSwitchResponse: SwitchInfo,
pb.ListEntitiesTextSensorResponse: TextSensorInfo,
pb.ListEntitiesServicesResponse: None,
pb.ListEntitiesCameraResponse: CameraInfo,
pb.ListEntitiesClimateResponse: ClimateInfo,
}
def do_append(msg):
return isinstance(msg, tuple(response_types.keys()))
def do_stop(msg):
return isinstance(msg, pb.ListEntitiesDoneResponse)
resp = await self._connection.send_message_await_response_complex(
pb.ListEntitiesRequest(), do_append, do_stop, timeout=5)
entities = []
services = []
for msg in resp:
if isinstance(msg, pb.ListEntitiesServicesResponse):
args = []
for arg in msg.args:
args.append(UserServiceArg(
name=arg.name,
type_=arg.type,
))
services.append(UserService(
name=msg.name,
key=msg.key,
args=args,
))
continue
cls = None
for resp_type, cls in response_types.items():
if isinstance(msg, resp_type):
break
kwargs = {}
for key, _ in attr.fields_dict(cls).items():
kwargs[key] = getattr(msg, key)
entities.append(cls(**kwargs))
return entities, services
async def subscribe_states(self, on_state: Callable[[Any], None]) -> None:
self._check_authenticated()
response_types = {
pb.BinarySensorStateResponse: BinarySensorState,
pb.CoverStateResponse: CoverState,
pb.FanStateResponse: FanState,
pb.LightStateResponse: LightState,
pb.SensorStateResponse: SensorState,
pb.SwitchStateResponse: SwitchState,
pb.TextSensorStateResponse: TextSensorState,
pb.ClimateStateResponse: ClimateState,
}
image_stream = {}
def on_msg(msg):
if isinstance(msg, pb.CameraImageResponse):
data = image_stream.pop(msg.key, bytes()) + msg.data
if msg.done:
on_state(CameraState(key=msg.key, image=data))
else:
image_stream[msg.key] = data
return
for resp_type, cls in response_types.items():
if isinstance(msg, resp_type):
break
else:
return
kwargs = {}
# pylint: disable=undefined-loop-variable
for key, _ in attr.fields_dict(cls).items():
kwargs[key] = getattr(msg, key)
on_state(cls(**kwargs))
await self._connection.send_message_callback_response(pb.SubscribeStatesRequest(), on_msg)
async def subscribe_logs(self, on_log: Callable[[pb.SubscribeLogsResponse], None],
log_level=None) -> None:
self._check_authenticated()
def on_msg(msg):
if isinstance(msg, pb.SubscribeLogsResponse):
on_log(msg)
req = pb.SubscribeLogsRequest()
if log_level is not None:
req.level = log_level
await self._connection.send_message_callback_response(req, on_msg)
async def subscribe_service_calls(self, on_service_call: Callable[[HomeassistantServiceCall], None]) -> None:
self._check_authenticated()
def on_msg(msg):
if isinstance(msg, pb.HomeassistantServiceResponse):
kwargs = {}
for key, _ in attr.fields_dict(HomeassistantServiceCall).items():
kwargs[key] = getattr(msg, key)
on_service_call(HomeassistantServiceCall(**kwargs))
await self._connection.send_message_callback_response(pb.SubscribeHomeassistantServicesRequest(),
on_msg)
async def subscribe_home_assistant_states(self, on_state_sub: Callable[[str], None]) -> None:
self._check_authenticated()
def on_msg(msg):
if isinstance(msg, pb.SubscribeHomeAssistantStateResponse):
on_state_sub(msg.entity_id)
await self._connection.send_message_callback_response(
pb.SubscribeHomeAssistantStatesRequest(), on_msg)
async def send_home_assistant_state(self, entity_id: str, state: str) -> None:
self._check_authenticated()
await self._connection.send_message(pb.HomeAssistantStateResponse(
entity_id=entity_id,
state=state,
))
async def cover_command(self,
key: int,
position: Optional[float] = None,
tilt: Optional[float] = None,
stop: bool = False,
) -> None:
self._check_authenticated()
req = pb.CoverCommandRequest()
req.key = key
if self.api_version >= APIVersion(1, 1):
if position is not None:
req.has_position = True
req.position = position
if tilt is not None:
req.has_tilt = True
req.tilt = tilt
if stop:
req.stop = stop
else:
req.has_legacy_command = True
if stop:
req.legacy_command = LegacyCoverCommand.STOP
elif position == 1.0:
req.legacy_command = LegacyCoverCommand.OPEN
else:
req.legacy_command = LegacyCoverCommand.CLOSE
await self._connection.send_message(req)
async def fan_command(self,
key: int,
state: Optional[bool] = None,
speed: Optional[FanSpeed] = None,
oscillating: Optional[bool] = None
) -> None:
self._check_authenticated()
req = pb.FanCommandRequest()
req.key = key
if state is not None:
req.has_state = True
req.state = state
if speed is not None:
req.has_speed = True
req.speed = speed
if oscillating is not None:
req.has_oscillating = True
req.oscillating = oscillating
await self._connection.send_message(req)
async def light_command(self,
key: int,
state: Optional[bool] = None,
brightness: Optional[float] = None,
rgb: Optional[Tuple[float, float, float]] = None,
white: Optional[float] = None,
color_temperature: Optional[float] = None,
transition_length: Optional[float] = None,
flash_length: Optional[float] = None,
effect: Optional[str] = None,
):
self._check_authenticated()
req = pb.LightCommandRequest()
req.key = key
if state is not None:
req.has_state = True
req.state = state
if brightness is not None:
req.has_brightness = True
req.brightness = brightness
if rgb is not None:
req.has_rgb = True
req.red = rgb[0]
req.green = rgb[1]
req.blue = rgb[2]
if white is not None:
req.has_white = True
req.white = white
if color_temperature is not None:
req.has_color_temperature = True
req.color_temperature = color_temperature
if transition_length is not None:
req.has_transition_length = True
req.transition_length = int(round(transition_length * 1000))
if flash_length is not None:
req.has_flash_length = True
req.flash_length = int(round(flash_length * 1000))
if effect is not None:
req.has_effect = True
req.effect = effect
await self._connection.send_message(req)
async def switch_command(self,
key: int,
state: bool
) -> None:
self._check_authenticated()
req = pb.SwitchCommandRequest()
req.key = key
req.state = state
await self._connection.send_message(req)
async def climate_command(self,
key: int,
mode: Optional[ClimateMode] = None,
target_temperature: Optional[float] = None,
target_temperature_low: Optional[float] = None,
target_temperature_high: Optional[float] = None,
away: Optional[bool] = None,
fan_mode: Optional[ClimateFanMode] = None,
swing_mode: Optional[ClimateSwingMode] = None,
) -> None:
self._check_authenticated()
req = pb.ClimateCommandRequest()
req.key = key
if mode is not None:
req.has_mode = True
req.mode = mode
if target_temperature is not None:
req.has_target_temperature = True
req.target_temperature = target_temperature
if target_temperature_low is not None:
req.has_target_temperature_low = True
req.target_temperature_low = target_temperature_low
if target_temperature_high is not None:
req.has_target_temperature_high = True
req.target_temperature_high = target_temperature_high
if away is not None:
req.has_away = True
req.away = away
if fan_mode is not None:
req.has_fan_mode = True
req.fan_mode = fan_mode
if swing_mode is not None:
req.has_swing_mode = True
req.swing_mode = swing_mode
await self._connection.send_message(req)
async def execute_service(self, service: UserService, data: dict):
self._check_authenticated()
req = pb.ExecuteServiceRequest()
req.key = service.key
args = []
for arg_desc in service.args:
arg = pb.ExecuteServiceArgument()
val = data[arg_desc.name]
int_type = 'int_' if self.api_version >= APIVersion(
1, 3) else 'legacy_int'
map_single = {
UserServiceArgType.BOOL: 'bool_',
UserServiceArgType.INT: int_type,
UserServiceArgType.FLOAT: 'float_',
UserServiceArgType.STRING: 'string_',
}
map_array = {
UserServiceArgType.BOOL_ARRAY: 'bool_array',
UserServiceArgType.INT_ARRAY: 'int_array',
UserServiceArgType.FLOAT_ARRAY: 'float_array',
UserServiceArgType.STRING_ARRAY: 'string_array',
}
# pylint: disable=redefined-outer-name
if arg_desc.type_ in map_array:
attr = getattr(arg, map_array[arg_desc.type_])
attr.extend(val)
else:
setattr(arg, map_single[arg_desc.type_], val)
args.append(arg)
# pylint: disable=no-member
req.args.extend(args)
await self._connection.send_message(req)
async def _request_image(self, *, single=False, stream=False):
req = pb.CameraImageRequest()
req.single = single
req.stream = stream
await self._connection.send_message(req)
async def request_single_image(self):
await self._request_image(single=True)
async def request_image_stream(self):
await self._request_image(stream=True)
@property
def api_version(self) -> Optional[APIVersion]:
if self._connection is None:
return None
return self._connection.api_version
| 37.309927
| 113
| 0.564735
|
4a0f0178d5d2c991ea2eafe0fd2464101571a550
| 2,548
|
py
|
Python
|
easilyb/terminal.py
|
xaled/easilyb
|
cdb5f738205f700b37e03c50d04061a2d1e730cc
|
[
"MIT"
] | null | null | null |
easilyb/terminal.py
|
xaled/easilyb
|
cdb5f738205f700b37e03c50d04061a2d1e730cc
|
[
"MIT"
] | null | null | null |
easilyb/terminal.py
|
xaled/easilyb
|
cdb5f738205f700b37e03c50d04061a2d1e730cc
|
[
"MIT"
] | null | null | null |
import sys
COLORS = {
'blue': '\033[94m',
'green': '\033[92m',
'yellow': '\033[93m',
'red': '\033[91m',
}
END_COLOR = '\033[0m'
def cprint(*args, sep=' ', end='\n', file=None, color=None):
file = file or sys.stdout
if color is not None and color in COLORS:
args = list(args)
args.insert(0, COLORS[color])
args.append(END_COLOR)
print(*args, file=file, sep=sep, end=end)
def ctext(txt, color):
return COLORS[color] + txt + END_COLOR
def yes_no_prompt(message, tries=10, default='n', color=None, trailing=': '):
return prompt(message, choices=['y', 'n'], default=default, color=color, trailing=trailing, tries=tries).lower()\
== 'y'
def prompt(message, match=None, match_function=None, choices=None, tries=10, default=None, allow_empty=False,
color=None, trailing=': ', case_sensitive=False, description=None):
t = 0
prompt_message = message
default_showed = default is None
# choices
if not case_sensitive and choices is not None:
choices = [c.lower() for c in choices]
if choices is not None:
if default is not None and default in choices and not case_sensitive:
prompt_message += ' [%s]' % '/'.join([c if c != default else c.upper() for c in choices])
default_showed = True
else:
prompt_message += ' [%s]' % '/'.join(choices)
# description & default value
if description is not None:
prompt_message += ' (%s)' % description
elif not default_showed:
prompt_message += ' (Default: %s)' % default
# trailing
prompt_message += trailing
if not case_sensitive and choices is not None:
choices = [c.lower() for c in choices]
if color is not None and color in COLORS:
prompt_message = COLORS[color] + prompt_message + END_COLOR
while t < tries:
data = input(prompt_message)
datac = data if case_sensitive else data.lower()
if len(data.strip()) == 0 and default is not None:
return default
if choices is not None:
if datac in choices:
return data
elif match is not None:
if match.match(datac):
return data
elif match_function is not None:
if match_function(datac):
return data
else:
if allow_empty or len(data.strip()) > 0:
return data
t += 1
raise ValueError("Max tries reached! data entered by the user is not valid!")
| 32.666667
| 117
| 0.598901
|
4a0f0282b864c599c95b7425264e389867317025
| 1,520
|
py
|
Python
|
tests/implementation/testsuites/smoke_tests/edentest_smoke.py
|
whanderley/eden
|
08ced3be3d52352c54cbd412ed86128fbb68b1d2
|
[
"MIT"
] | 205
|
2015-01-20T08:26:09.000Z
|
2022-03-27T19:59:33.000Z
|
tests/implementation/testsuites/smoke_tests/edentest_smoke.py
|
nursix/eden-asp
|
e49f46cb6488918f8d5a163dcd5a900cd686978c
|
[
"MIT"
] | 249
|
2015-02-10T09:56:35.000Z
|
2022-03-23T19:54:36.000Z
|
tests/implementation/testsuites/smoke_tests/edentest_smoke.py
|
nursix/eden-asp
|
e49f46cb6488918f8d5a163dcd5a900cd686978c
|
[
"MIT"
] | 231
|
2015-02-10T09:33:17.000Z
|
2022-02-18T19:56:05.000Z
|
# -*- coding: utf-8 -*-
SKIP_PROTOCOLS = ("javascript",
"ftp",
)
class edentest_smoke(object):
""" Smoke Test, visit every link it can find and report on the outcome """
def __init__(self, base_url, do_not_follow, ext_links):
self.base_url = base_url
self.do_not_follow = do_not_follow
self.follow_external_links = ext_links
def check_if_url_should_be_skipped(self, url):
if len(url) == 0:
return 1
if url.find("/admin/user") != -1 and url.find("/disable") != -1:
# Bad idea to disable user accounts during smoke tests
return 1
if not self.follow_external_links and url.find(self.base_url) == -1:
return 1
if any(url.startswith("%s:" % p) for p in SKIP_PROTOCOLS):
return 1
for ignore in self.do_not_follow:
if url.find(ignore) != -1:
return 1
return 0
@staticmethod
def strip_url_of_unwanted_parts(url):
strip_url = ("?_next=",)
for strip_part in strip_url:
url = url.split(strip_part)[0]
return url
@staticmethod
def increment_urls_failed(lf, status):
if status != 0:
lf = lf + 1
return lf
@staticmethod
def check_if_not_already_added_or_visited(visited, to_visit, url_list, url):
if url in visited or url in to_visit or url in url_list:
return 1
else:
return 0
| 24.516129
| 80
| 0.574342
|
4a0f04a34a488f856a6ca6637628226bdea93a6d
| 24
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Rounding and clipping/clip Clip (limit) the values in an array.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Rounding and clipping/clip Clip (limit) the values in an array.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Rounding and clipping/clip Clip (limit) the values in an array.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
np.clip(a, a_min, a_max)
| 24
| 24
| 0.708333
|
4a0f054a29b2817babcfacd2adf72084869d07d4
| 4,012
|
py
|
Python
|
sauron/__init__.py
|
bantini/Sauron
|
131079fe66ea184ae3add4ea8124b2e0f0e8937e
|
[
"Apache-2.0"
] | null | null | null |
sauron/__init__.py
|
bantini/Sauron
|
131079fe66ea184ae3add4ea8124b2e0f0e8937e
|
[
"Apache-2.0"
] | null | null | null |
sauron/__init__.py
|
bantini/Sauron
|
131079fe66ea184ae3add4ea8124b2e0f0e8937e
|
[
"Apache-2.0"
] | null | null | null |
from sauron.machine_monitor import MachineMonitor
from sauron.ping_monitor import PingMonitor
from sauron.process_monitor import ProcessMonitor
from sauron.stat_aggregator import machine_stat_aggregator, ping_stat_aggregator, process_stat_aggregator
from sauron.machine_warning_generator import *
from sauron.ping_warning_generator import *
from sauron.process_warning_generator import *
class Sauron(object):
def __init__(self):
pass
def get_server_stats(self):
# Get all the stats and aggregate it
mc_monitor = MachineMonitor()
machine_stats = mc_monitor.get_machine_info()
pg_monitor = PingMonitor('/Users/nilayan/Documents/Sauron/sauron/config/config.json')
ping_stats = pg_monitor.get_ping_info()
pc_monitor = ProcessMonitor('node')
process_stats = pc_monitor.get_process_info()
server_stats = (machine_stat_aggregator(machine_stats),
ping_stat_aggregator(ping_stats),
process_stat_aggregator(process_stats))
return server_stats
def get_cpu_health(self):
# Print warnings and errors on the status of the CPU
mc_monitor = MachineMonitor()
machine_stats = mc_monitor.get_machine_info()
cpu_warning = cpu_warning_generator(machine_stats['cpu_times'])
if cpu_warning:
print("Warning. CPU usage of system is exceeding idle time")
else:
print("CPU health is fine")
def get_memory_health(self, threshold=None):
# Print warnings and errors on the status of the RAM
mc_monitor = MachineMonitor()
machine_stats = mc_monitor.get_machine_info()
if threshold:
mem_threshold = threshold
else:
mem_threshold = 1024*1024*500 #500 MB
memory_warning = memory_warning_generator(machine_stats['virtual_memory'], mem_threshold)
if memory_warning:
print("Warning. Only 500 MB of memory left")
else:
print("Memory health is fine")
def get_disk_health(self, threshold=None):
# Print warnings and errors on the status of the disk
mc_monitor = MachineMonitor()
machine_stats = mc_monitor.get_machine_info()
if threshold:
mem_threshold = threshold
else:
mem_threshold = 1024*1024*1024 #1 GB
disk_warning = disk_warning_generator(machine_stats['disk_usage']['/'], mem_threshold)
if disk_warning:
print("Warning. Over 80 percent of disk used")
else:
print("Disk health is fine")
def get_ping_health(self, config_path=None):
# Print warnings and errors on the status of the endpoints
try:
if config_path:
ping_monitor = PingMonitor(config_path)
else:
ping_monitor = PingMonitor('./config/config.json')
ping_stats = ping_monitor.get_ping_info()
api_warning_generator(ping_stats)
except IOError:
print("Error! Please provide correct file path to config file")
def get_process_cpu_health(self, threshold=None):
process_monitor = ProcessMonitor('node')
process_stats = process_monitor.get_process_info()
if threshold:
cpu_threshold = threshold
else:
cpu_threshold = 1
cpu_percent = process_stats['cpu_percent']
process_cpu_percent_generator(cpu_percent, cpu_threshold)
def get_process_memory_health(self, threshold=None):
process_monitor = ProcessMonitor('node')
process_stats = process_monitor.get_process_info()
if threshold:
process_threshold = threshold
else:
process_threshold = 1
memory_percent = process_stats['memory_percent']
process_memory_percent_generator(memory_percent, process_threshold)
if __name__ == "__main__":
sauron = Sauron()
mc_stats = sauron.get_server_stats()
print(mc_stats)
| 38.951456
| 105
| 0.671236
|
4a0f05a856b6171880167cb4358f828104143672
| 484
|
py
|
Python
|
wtforms_widgets/base_form.py
|
agdsn/wtforms-widgets
|
60db4a00376dffa359e8e9e8ff0c9828363b9c4c
|
[
"Apache-2.0"
] | 2
|
2021-03-21T07:11:47.000Z
|
2021-04-26T22:54:44.000Z
|
wtforms_widgets/base_form.py
|
agdsn/wtforms-widgets
|
60db4a00376dffa359e8e9e8ff0c9828363b9c4c
|
[
"Apache-2.0"
] | 4
|
2020-06-07T21:29:42.000Z
|
2021-08-24T02:48:18.000Z
|
wtforms_widgets/base_form.py
|
agdsn/wtforms-widgets
|
60db4a00376dffa359e8e9e8ff0c9828363b9c4c
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
from flask_wtf import FlaskForm as Form
class BaseForm(Form):
def __iter__(self):
field_order = getattr(self, '_order', [])
if field_order:
ordered_fields = OrderedDict()
for name in field_order:
ordered_fields[name] = self._fields.pop(name)
ordered_fields.update(self._fields)
self._fields = ordered_fields
return super(BaseForm, self).__iter__()
| 23.047619
| 61
| 0.634298
|
4a0f067fb82846b91860009d2a5b771a40d9f2e9
| 219
|
py
|
Python
|
core/model/search_result_model.py
|
averak/google-results-extractor
|
9585a8836d3169db6537a420df22a441692381f2
|
[
"MIT"
] | null | null | null |
core/model/search_result_model.py
|
averak/google-results-extractor
|
9585a8836d3169db6537a420df22a441692381f2
|
[
"MIT"
] | null | null | null |
core/model/search_result_model.py
|
averak/google-results-extractor
|
9585a8836d3169db6537a420df22a441692381f2
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class SearchResultModel(BaseModel):
title: str
url: str
def __eq__(self, other):
return self.url == other.url
def __hash__(self):
return hash(self.url)
| 16.846154
| 36
| 0.652968
|
4a0f0824a76d7a8d2282d7e498bd8706cea0caa4
| 2,191
|
py
|
Python
|
pontoon/tags/utils/translations.py
|
timvisee/pontoon
|
aec1ef7b5c5d56c3be28fecf1147945d2622bbad
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/tags/utils/translations.py
|
timvisee/pontoon
|
aec1ef7b5c5d56c3be28fecf1147945d2622bbad
|
[
"BSD-3-Clause"
] | 4
|
2021-01-12T12:00:43.000Z
|
2021-12-13T20:49:04.000Z
|
pontoon/tags/utils/translations.py
|
sa-tasche/pontoon
|
f02cb9e92add6c035b1cbe66c301afcd95ae0489
|
[
"BSD-3-Clause"
] | 3
|
2017-10-15T13:15:47.000Z
|
2018-10-10T09:03:19.000Z
|
from django.db.models import Max, Q
from .base import TagsTRTool
class TagsLatestTranslationsTool(TagsTRTool):
"""For given filters this tool will find the latest ``Translations``
for a ``Tag``. It uses TranslatedResources to find the translations
but returns translations.
"""
filter_methods = ("tag", "projects", "latest", "locales", "path")
_default_annotations = (
("date", Max("latest_translation__date")),
("approved_date", Max("latest_translation__approved_date")),
)
@property
def groupby_prefix(self):
# as we find latest_translations for translated_resources
# and use that to retrieve the translations, we need to map the groupby
# field here
groupby = list(self.get_groupby())
if groupby == ["resource__tag"]:
return "entity__resource__tag"
elif groupby == ["locale"]:
return "locale"
def coalesce(self, data):
return {
translation[self.groupby_prefix]: translation
for translation in data.iterator()
}
def get_data(self):
_translations = self.translation_manager.none()
stats = super(TagsLatestTranslationsTool, self).get_data()
for tr in stats.iterator():
if tr["approved_date"] is not None and tr["approved_date"] > tr["date"]:
key = "approved_date"
else:
key = "date"
# find translations with matching date and tag/locale
_translations |= self.translation_manager.filter(
Q(**{key: tr[key], self.groupby_prefix: tr[self.get_groupby()[0]]})
)
return _translations.values(
*(
"string",
"date",
"approved_date",
"approved_user__email",
"approved_user__first_name",
"approved_user__username",
"user__email",
"user__first_name",
"user__username",
)
+ (self.groupby_prefix,)
)
def filter_latest(self, qs):
return qs.exclude(latest_translation__isnull=True)
| 32.220588
| 84
| 0.582382
|
4a0f08a367f80c2130959f95b84825c704caee49
| 2,419
|
py
|
Python
|
sdk/scheduler/azure-mgmt-scheduler/tests/conftest.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-01-24T08:54:57.000Z
|
2022-01-24T08:54:57.000Z
|
sdk/scheduler/azure-mgmt-scheduler/tests/conftest.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/scheduler/azure-mgmt-scheduler/tests/conftest.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import os
import platform
import pytest
import sys
from dotenv import load_dotenv
from devtools_testutils import test_proxy, add_general_regex_sanitizer
from devtools_testutils import add_header_regex_sanitizer, add_body_key_sanitizer
# Ignore async tests for Python < 3.5
collect_ignore_glob = []
if sys.version_info < (3, 5) or platform.python_implementation() == "PyPy":
collect_ignore_glob.append("*_async.py")
load_dotenv()
@pytest.fixture(scope="session", autouse=True)
def add_sanitizers(test_proxy):
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000")
tenant_id = os.environ.get("AZURE_TENANT_ID", "00000000-0000-0000-0000-000000000000")
add_general_regex_sanitizer(regex=subscription_id, value="00000000-0000-0000-0000-000000000000")
add_general_regex_sanitizer(regex=tenant_id, value="00000000-0000-0000-0000-000000000000")
add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]")
add_header_regex_sanitizer(key="Cookie", value="cookie;")
add_body_key_sanitizer(json_path="$..access_token", value="access_token")
| 47.431373
| 101
| 0.733774
|
4a0f08d3f9574c6c21fb445481ca0fca0fcd514f
| 15,376
|
py
|
Python
|
wiji/worker.py
|
komuw/xyzabc
|
80a3aafc6d098cc7af7e08d8ebdea7d55cef50b0
|
[
"MIT"
] | 4
|
2019-07-23T20:40:46.000Z
|
2019-08-16T15:30:54.000Z
|
wiji/worker.py
|
komuw/wiji
|
80a3aafc6d098cc7af7e08d8ebdea7d55cef50b0
|
[
"MIT"
] | 73
|
2019-02-28T10:16:12.000Z
|
2019-07-25T00:53:38.000Z
|
wiji/worker.py
|
komuw/xyzabc
|
80a3aafc6d098cc7af7e08d8ebdea7d55cef50b0
|
[
"MIT"
] | 1
|
2019-08-16T15:31:06.000Z
|
2019-08-16T15:31:06.000Z
|
import os
import json
import time
import random
import string
import typing
import logging
import asyncio
import datetime
from . import task
from . import protocol
from . import watchdog
from . import ratelimiter
class Worker:
"""
The only time this worker coroutine should ever raise an Exception is either:
- during class instantiation
- when the worker is about to start consuming tasks
"""
def __init__(
self,
the_task: task.Task,
worker_id: typing.Union[None, str] = None,
use_watchdog: bool = False,
watchdog_duration: float = 0.1,
) -> None:
"""
"""
self._validate_worker_args(
the_task=the_task,
worker_id=worker_id,
use_watchdog=use_watchdog,
watchdog_duration=watchdog_duration,
)
self._PID = os.getpid()
self.the_task = the_task
if worker_id is not None:
self.worker_id = worker_id
else:
self.worker_id = "".join(random.choices(string.ascii_uppercase + string.digits, k=17))
if typing.TYPE_CHECKING:
assert isinstance(self.the_task.log_metadata, dict)
self.the_task.log_metadata.update({"worker_id": self.worker_id, "process_id": self._PID})
self.the_task.logger.bind(
level=self.the_task.loglevel, log_metadata=self.the_task.log_metadata
)
self.use_watchdog = use_watchdog
self.watchdog_duration = watchdog_duration
self.watchdog = None
if self.use_watchdog:
if typing.TYPE_CHECKING:
assert isinstance(self.the_task.task_name, str)
self.watchdog = watchdog.BlockingWatchdog(
watchdog_duration=self.watchdog_duration, task_name=self.the_task.task_name
)
self.SHOULD_SHUT_DOWN: bool = False
self.SUCCESFULLY_SHUT_DOWN: bool = False
self.the_task._sanity_check_logger(event="worker_sanity_check_logger")
def _validate_worker_args(
self,
the_task: task.Task,
worker_id: typing.Union[None, str],
use_watchdog: bool,
watchdog_duration: float,
) -> None:
if not isinstance(the_task, task.Task):
raise ValueError(
"`the_task` should be of type:: `wiji.task.Task` You entered: {0}".format(
type(the_task) # for this `the_task._debug_task_name` may be unavailable
)
)
if not isinstance(worker_id, (type(None), str)):
raise ValueError(
"Task: {0}. `worker_id` should be of type:: `None` or `str` You entered: {1}".format(
the_task._debug_task_name, type(worker_id)
)
)
if not isinstance(use_watchdog, bool):
raise ValueError(
"Task: {0}. `use_watchdog` should be of type:: `bool` You entered: {1}".format(
the_task._debug_task_name, type(use_watchdog)
)
)
if not isinstance(watchdog_duration, float):
raise ValueError(
"Task: {0}. `watchdog_duration` should be of type:: `float` You entered: {1}".format(
the_task._debug_task_name, type(watchdog_duration)
)
)
def _log(self, level: typing.Union[str, int], log_data: dict) -> None:
try:
self.the_task.logger.log(level, log_data)
except Exception:
pass
@staticmethod
def _retry_after(current_retries: int) -> int:
"""
returns the number of seconds to retry after.
retries will happen in this sequence;
0.5min, 1min, 2min, 4min, 8min, 16min, 32min, 16min, 16min, 16min ...
"""
# TODO:
# 1. give users ability to bring their own retry algorithms.
if current_retries < 0:
current_retries = 0
jitter = random.randint(60, 180) # 1min-3min
if current_retries in [0, 1]:
return int(0.5 * 60) # 0.5min
elif current_retries == 2:
return 1 * 60
elif current_retries >= 6:
return (16 * 60) + jitter # 16 minutes + jitter
else:
return (60 * (2 ** current_retries)) + jitter
async def _notify_broker(self, item: str, queue_name: str, state: task.TaskState) -> None:
try:
await self.the_task.the_broker.done(queue_name=queue_name, item=item, state=state)
except Exception as e:
self._log(
logging.ERROR,
{
"event": "wiji.Worker.consume_tasks",
"stage": "end",
"state": "broker done error",
"error": str(e),
},
)
async def run_task(self, *task_args: typing.Any, **task_kwargs: typing.Any) -> None:
task_options = task_kwargs.pop("task_options", {})
await self.the_task._notify_hook(
task_id=task_options.get("task_id"),
state=task.TaskState.EXECUTING,
hook_metadata=task_options.get("hook_metadata"),
)
if self.watchdog is not None:
self.watchdog.notify_alive_before()
return_value = None
execution_exception = None
thread_time_start = time.thread_time()
perf_counter_start = time.perf_counter()
monotonic_start = time.monotonic()
process_time_start = time.process_time()
try:
return_value = await self.the_task.run(*task_args, **task_kwargs)
if self.the_task.the_chain and not self.the_task._RETRYING:
# enqueue the chained task using the return_value
await self.the_task.the_chain.delay(return_value)
if self.the_task._RETRYING:
# task is been retried
self._log(
logging.INFO,
{
"event": "wiji.Worker.run_task",
"state": "Task is been retried.",
"stage": "end",
"task_name": self.the_task.task_name,
"current_retries": task_options.get("current_retries"),
"max_retries": task_options.get("max_retries"),
},
)
except Exception as e:
execution_exception = e
self._log(
logging.ERROR,
{
"event": "wiji.Worker.run_task",
"stage": "end",
"state": "task execution error",
"error": str(e),
},
)
finally:
thread_time_end = time.thread_time()
perf_counter_end = time.perf_counter()
monotonic_end = time.monotonic()
process_time_end = time.process_time()
execution_duration = {
"thread_time": float("{0:.4f}".format(thread_time_end - thread_time_start)),
"perf_counter": float("{0:.4f}".format(perf_counter_end - perf_counter_start)),
"monotonic": float("{0:.4f}".format(monotonic_end - monotonic_start)),
"process_time": float("{0:.4f}".format(process_time_end - process_time_start)),
}
await self.the_task._notify_ratelimiter(
task_id=task_options.get("task_id"),
state=task.TaskState.EXECUTED,
execution_duration=execution_duration,
execution_exception=execution_exception,
return_value=return_value,
)
await self.the_task._notify_hook(
task_id=task_options.get("task_id"),
state=task.TaskState.EXECUTED,
hook_metadata=task_options.get("hook_metadata"),
execution_duration=execution_duration,
execution_exception=execution_exception,
return_value=return_value,
)
if self.watchdog is not None:
self.watchdog.notify_alive_after()
async def consume_tasks(
self, TESTING: bool = False
) -> typing.Union[None, typing.Dict[str, typing.Any]]:
"""
In loop; dequeues items from the :attr:`queue <Worker.queue>` and calls :func:`run <Worker.run>`.
Parameters:
TESTING: indicates whether this method is been called while running tests.
"""
# this can exit with error
await self.the_task._broker_check(from_worker=True)
if self.watchdog is not None:
self.watchdog.start()
dequeue_retry_count = 0
while True:
self._log(logging.INFO, {"event": "wiji.Worker.consume_tasks", "stage": "start"})
if self.SHOULD_SHUT_DOWN:
self._log(
logging.INFO,
{
"event": "wiji.Worker.consume_tasks",
"stage": "end",
"state": "cleanly shutting down worker.",
},
)
return None
try:
if typing.TYPE_CHECKING:
# make mypy happy
# https://github.com/python/mypy/issues/4805
assert isinstance(self.the_task.the_ratelimiter, ratelimiter.BaseRateLimiter)
# rate limit ourselves
await self.the_task.the_ratelimiter.limit()
except Exception as e:
self._log(
logging.ERROR,
{
"event": "wiji.Worker.consume_tasks",
"stage": "end",
"state": "consume_tasks error",
"error": str(e),
},
)
continue
try:
_dequeued_item: str = await self.the_task.the_broker.dequeue(
queue_name=self.the_task.queue_name
)
dequeued_item: dict = json.loads(_dequeued_item)
except Exception as e:
poll_queue_interval = self._retry_after(dequeue_retry_count)
dequeue_retry_count += 1
self._log(
logging.ERROR,
{
"event": "wiji.Worker.consume_tasks",
"stage": "end",
"state": "dequeue tasks failed. sleeping for {0}minutes".format(
poll_queue_interval / 60
),
"dequeue_retry_count": dequeue_retry_count,
"error": str(e),
},
)
await asyncio.sleep(poll_queue_interval)
continue
# dequeue succeded
dequeue_retry_count = 0
try:
_ = dequeued_item["version"]
_task_options = dequeued_item["task_options"]
task_id = _task_options["task_id"]
task_eta = _task_options["eta"]
task_current_retries = _task_options["current_retries"]
task_max_retries = _task_options["max_retries"]
task_hook_metadata = _task_options["hook_metadata"]
task_args = _task_options["args"]
task_kwargs = _task_options["kwargs"]
task_kwargs.update(
{
"task_options": {
"task_id": task_id,
"eta": task_eta,
"current_retries": task_current_retries,
"max_retries": task_max_retries,
"hook_metadata": task_hook_metadata,
}
}
)
except KeyError as e:
e = KeyError("enqueued message/object is missing required field: {}".format(str(e)))
self._log(
logging.ERROR,
{
"event": "wiji.Worker.consume_tasks",
"stage": "end",
"state": "consume_tasks error",
"error": str(e),
},
)
continue
await self.the_task._notify_hook(
task_id=task_id, state=task.TaskState.DEQUEUED, hook_metadata=task_hook_metadata
)
now = datetime.datetime.now(tz=datetime.timezone.utc)
if protocol.Protocol._from_isoformat(task_eta) <= now:
await self.run_task(*task_args, **task_kwargs)
await self._notify_broker(
item=_dequeued_item,
queue_name=self.the_task.queue_name,
state=task.TaskState.EXECUTED,
)
else:
# respect eta
task_kwargs.pop("task_options", None)
await self.the_task.delay(*task_args, **task_kwargs)
self._log(
logging.INFO,
{"event": "wiji.Worker.consume_tasks", "stage": "end", "task_id": task_id},
)
if TESTING:
# offer escape hatch for tests to come out of endless loop
task_kwargs.pop("task_options", None)
return dequeued_item
async def shutdown(self) -> None:
"""
Cleanly shutdown this worker.
"""
self._log(
logging.INFO,
{
"event": "wiji.Worker.shutdown",
"stage": "start",
"state": "intiating shutdown",
"drain_duration": self.the_task.drain_duration,
},
)
self.SHOULD_SHUT_DOWN = True
if self.watchdog is not None:
self.watchdog.stop()
# half spent waiting for the broker, the other half just sleeping
wait_duration = self.the_task.drain_duration / 2
try:
# asyncio.wait takes a python set as a first argument
# after expiration of timeout, asyncio.wait does not cancel the task;
# thus the broker shutdown can still continue on its own if it can.
await asyncio.wait(
{
self.the_task.the_broker.shutdown(
queue_name=self.the_task.queue_name, duration=wait_duration
)
},
timeout=wait_duration,
)
except Exception as e:
self._log(
logging.ERROR,
{
"event": "wiji.Worker.shutdown",
"stage": "end",
"state": "calling broker shutdown error",
"error": str(e),
},
)
# sleep so that worker can finish executing any tasks it had already dequeued.
# we need to use asyncio.sleep so that we do not block eventloop.
# this way, we do not prevent any other workers in the same loop from also shutting down cleanly.
await asyncio.sleep(wait_duration)
self.SUCCESFULLY_SHUT_DOWN = True
| 38.536341
| 105
| 0.524129
|
4a0f094a46dd62973c6ab900b9a819d17409fd21
| 2,579
|
py
|
Python
|
docs/conf.py
|
mkurop/kmeanspp
|
7944372b14a24c7b7c1a99839cf8d35aedd21dbb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
mkurop/kmeanspp
|
7944372b14a24c7b7c1a99839cf8d35aedd21dbb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
mkurop/kmeanspp
|
7944372b14a24c7b7c1a99839cf8d35aedd21dbb
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'KMeans++ using CuPy or NumPy'
copyright = '2021, Marcin Kuropatwiński'
author = 'Marcin Kuropatwiński'
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
latex_elements = {
"papersize": "letterpaper",
"pointsize": "10pt",
"figure_align": "htbp",
"preamble": r"""
\usepackage{listings}
\lstset{
language=Python, % the language of the code
title=\lstname % show the filename of files included with \lstinputlisting; also try caption instead of title
}
""",
}
| 33.934211
| 139
| 0.633191
|
4a0f0b4421291fdd5cc8eebdeeeb8430e0e96b30
| 4,027
|
py
|
Python
|
vendor/tweepy/cursor.py
|
aragilar/NewsBlur
|
64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9
|
[
"MIT"
] | 18
|
2015-07-29T07:14:41.000Z
|
2021-05-31T16:10:49.000Z
|
tweepy/cursor.py
|
Kudo/tweepy
|
bda2cf3c42013bfa5061bf44c37ec44689ab5d28
|
[
"MIT"
] | null | null | null |
tweepy/cursor.py
|
Kudo/tweepy
|
bda2cf3c42013bfa5061bf44c37ec44689ab5d28
|
[
"MIT"
] | 8
|
2016-02-05T10:13:40.000Z
|
2020-11-10T14:36:31.000Z
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.error import TweepError
class Cursor(object):
"""Pagination helper class"""
def __init__(self, method, *args, **kargs):
if hasattr(method, 'pagination_mode'):
if method.pagination_mode == 'cursor':
self.iterator = CursorIterator(method, args, kargs)
else:
self.iterator = PageIterator(method, args, kargs)
else:
raise TweepError('This method does not perform pagination')
def pages(self, limit=0):
"""Return iterator for pages"""
if limit > 0:
self.iterator.limit = limit
return self.iterator
def items(self, limit=0):
"""Return iterator for items in each page"""
i = ItemIterator(self.iterator)
i.limit = limit
return i
class BaseIterator(object):
def __init__(self, method, args, kargs):
self.method = method
self.args = args
self.kargs = kargs
self.limit = 0
def next(self):
raise NotImplementedError
def prev(self):
raise NotImplementedError
def __iter__(self):
return self
class CursorIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.next_cursor = -1
self.prev_cursor = 0
self.count = 0
def next(self):
if self.next_cursor == 0 or (self.limit and self.count == self.limit):
raise StopIteration
data, cursors = self.method(
cursor=self.next_cursor, *self.args, **self.kargs
)
self.prev_cursor, self.next_cursor = cursors
if len(data) == 0:
raise StopIteration
self.count += 1
return data
def prev(self):
if self.prev_cursor == 0:
raise TweepError('Can not page back more, at first page')
data, self.next_cursor, self.prev_cursor = self.method(
cursor=self.prev_cursor, *self.args, **self.kargs
)
self.count -= 1
return data
class PageIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.current_page = 0
def next(self):
self.current_page += 1
items = self.method(page=self.current_page, *self.args, **self.kargs)
if len(items) == 0 or (self.limit > 0 and self.current_page > self.limit):
raise StopIteration
return items
def prev(self):
if (self.current_page == 1):
raise TweepError('Can not page back more, at first page')
self.current_page -= 1
return self.method(page=self.current_page, *self.args, **self.kargs)
class ItemIterator(BaseIterator):
def __init__(self, page_iterator):
self.page_iterator = page_iterator
self.limit = 0
self.current_page = None
self.page_index = -1
self.count = 0
def next(self):
if self.limit > 0 and self.count == self.limit:
raise StopIteration
if self.current_page is None or self.page_index == len(self.current_page) - 1:
# Reached end of current page, get the next page...
self.current_page = self.page_iterator.next()
self.page_index = -1
self.page_index += 1
self.count += 1
return self.current_page[self.page_index]
def prev(self):
if self.current_page is None:
raise TweepError('Can not go back more, at first page')
if self.page_index == 0:
# At the beginning of the current page, move to next...
self.current_page = self.page_iterator.prev()
self.page_index = len(self.current_page)
if self.page_index == 0:
raise TweepError('No more items')
self.page_index -= 1
self.count -= 1
return self.current_page[self.page_index]
| 31.217054
| 86
| 0.599702
|
4a0f0b61c3fdc9e43d8973c8eabd399136060715
| 1,747
|
py
|
Python
|
samples/core/loop_parameter/loop_parameter_test.py
|
algs/pipelines
|
ab63956f3a61d4d11b27ac26f097e1784588fed9
|
[
"Apache-2.0"
] | 1
|
2020-12-20T11:26:00.000Z
|
2020-12-20T11:26:00.000Z
|
samples/core/loop_parameter/loop_parameter_test.py
|
algs/pipelines
|
ab63956f3a61d4d11b27ac26f097e1784588fed9
|
[
"Apache-2.0"
] | null | null | null |
samples/core/loop_parameter/loop_parameter_test.py
|
algs/pipelines
|
ab63956f3a61d4d11b27ac26f097e1784588fed9
|
[
"Apache-2.0"
] | 1
|
2022-01-11T17:02:22.000Z
|
2022-01-11T17:02:22.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from .loop_parameter import my_pipeline
from ...test.util import run_pipeline_func, TestCase, NEEDS_A_FIX
run_pipeline_func([
# TODO(v2-compatible): fix this sample for v2 mode.
# V2_COMPATIBLE mode fails with:
# File "/Users/gongyuan/kfp/pipelines/sdk/python/kfp/compiler/v2_compat.py", line 108, in update_op
# artifact_info = {"fileInputPath": op.input_artifact_paths[artifact_name]}
# KeyError: 'text'
#
# And another error:
# This step is in Error state with this message: withParam value could not
# be parsed as a JSON list:
# {
# "id":"733",
# "typeId":"114",
# "uri":"gs://gongyuan-test/kfp/output/pipeline-with-loop-parameter/pipeline-with-loop-parameter-pn8nj/pipeline-with-loop-parameter-pn8nj-1197421549/data",
# "createTimeSinceEpoch":"1617689193656",
# "lastUpdateTimeSinceEpoch":"1617689193656"
# }
# TestCase(
# pipeline_func=my_pipeline,
# mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE,
# verify_func=NEEDS_A_FIX,
# ),
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
),
])
| 37.978261
| 161
| 0.709788
|
4a0f0c0f3a5d4bd1e047c4b5f6488516b3dddcf1
| 1,395
|
py
|
Python
|
mqtt_client.py
|
AngelLiang/RabbitMQ-MQTT-Consumer-Demo
|
161e1622af5379776e6408a6f1bceb4bf57d40ea
|
[
"MIT"
] | null | null | null |
mqtt_client.py
|
AngelLiang/RabbitMQ-MQTT-Consumer-Demo
|
161e1622af5379776e6408a6f1bceb4bf57d40ea
|
[
"MIT"
] | null | null | null |
mqtt_client.py
|
AngelLiang/RabbitMQ-MQTT-Consumer-Demo
|
161e1622af5379776e6408a6f1bceb4bf57d40ea
|
[
"MIT"
] | null | null | null |
# coding=utf-8
try:
import paho.mqtt.client as mqtt
except Exception:
print('Please install paho-mqtt')
print('pip install paho-mqtt')
exit(0)
MQTT_HOST = "127.0.0.1"
MQTT_PORT = 1883
MQTT_CLIENT_ID = 'python_mqtt_client'
MQTT_USERNAME = 'guest'
MQTT_PASSWORD = 'guest'
MQTT_KEEPALIVE = 120
MQTT_SUB_TOPIC = '/get'
MQTT_PUB_TOPIC = '/update'
MQTT_SUB_QOS = 1
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.publish(MQTT_PUB_TOPIC, 'hello')
client.subscribe(MQTT_SUB_TOPIC, MQTT_SUB_QOS)
def on_message(client, userdata, msg):
topic = msg.topic
payload = msg.payload.decode()
print('[topic]:' + topic)
print('[payload]:' + str(payload))
def input_work():
try:
while 1:
data = input()
if data == 'q':
exit(0)
print('input:' + data)
if data:
mqttc.publish(MQTT_PUB_TOPIC, data)
print('publish:' + data)
except (KeyboardInterrupt, SystemExit):
exit(0)
if __name__ == "__main__":
print('To exit input q')
mqttc = mqtt.Client(MQTT_CLIENT_ID)
mqttc.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE)
mqttc.loop_start()
input_work()
| 22.142857
| 55
| 0.637993
|
4a0f0d3b9109eed3c3036e6bad35261f86ea105f
| 11,848
|
py
|
Python
|
pymc3/tests/test_examples.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | 1
|
2018-06-11T03:13:00.000Z
|
2018-06-11T03:13:00.000Z
|
pymc3/tests/test_examples.py
|
shunanzhang/pymc3
|
fde52a4a69be1b0887a2f7861801fb48c941bbe6
|
[
"Apache-2.0"
] | 2
|
2017-03-02T05:56:13.000Z
|
2019-12-06T19:15:42.000Z
|
pymc3/tests/test_examples.py
|
shunanzhang/pymc3
|
fde52a4a69be1b0887a2f7861801fb48c941bbe6
|
[
"Apache-2.0"
] | 1
|
2018-10-08T10:27:35.000Z
|
2018-10-08T10:27:35.000Z
|
import matplotlib
import numpy as np
import pandas as pd
import pymc3 as pm
import theano.tensor as tt
import pytest
import theano
from pymc3.theanof import floatX
from .helpers import SeededTest
matplotlib.use('Agg', warn=False)
def get_city_data():
"""Helper to get city data"""
data = pd.read_csv(pm.get_data('srrs2.dat'))
cty_data = pd.read_csv(pm.get_data('cty.dat'))
data = data[data.state == 'MN']
data['fips'] = data.stfips * 1000 + data.cntyfips
cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips
data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))
data = data.merge(cty_data, 'inner', on='fips')
unique = data[['fips']].drop_duplicates()
unique['group'] = np.arange(len(unique))
unique.set_index('fips')
return data.merge(unique, 'inner', on='fips')
class TestARM5_4(SeededTest):
def build_model(self):
data = pd.read_csv(pm.get_data('wells.dat'),
delimiter=u' ', index_col=u'id',
dtype={u'switch': np.int8})
data.dist /= 100
data.educ /= 4
col = data.columns
P = data[col[1:]]
P -= P.mean()
P['1'] = 1
with pm.Model() as model:
effects = pm.Normal('effects', mu=0, sd=100, shape=len(P.columns))
logit_p = tt.dot(floatX(np.array(P)), effects)
pm.Bernoulli('s', logit_p=logit_p, observed=floatX(data.switch.values))
return model
def test_run(self):
model = self.build_model()
with model:
pm.sample(50, tune=50)
class TestARM12_6(SeededTest):
def build_model(self):
data = get_city_data()
self.obs_means = data.groupby('fips').lradon.mean().as_matrix()
lradon = data.lradon.as_matrix()
floor = data.floor.as_matrix()
group = data.group.as_matrix()
with pm.Model() as model:
groupmean = pm.Normal('groupmean', 0, 10. ** -2.)
groupsd = pm.Uniform('groupsd', 0, 10.)
sd = pm.Uniform('sd', 0, 10.)
floor_m = pm.Normal('floor_m', 0, 5. ** -2.)
means = pm.Normal('means', groupmean, groupsd ** -2., shape=len(self.obs_means))
pm.Normal('lr', floor * floor_m + means[group], sd ** -2., observed=lradon)
return model
def too_slow(self):
model = self.build_model()
start = {'groupmean': self.obs_means.mean(),
'groupsd_interval__': 0,
'sd_interval__': 0,
'means': self.obs_means,
'floor_m': 0.,
}
with model:
start = pm.find_MAP(start=start,
vars=[model['groupmean'], model['sd_interval__'], model['floor_m']])
step = pm.NUTS(model.vars, scaling=start)
pm.sample(50, step=step, start=start)
class TestARM12_6Uranium(SeededTest):
def build_model(self):
data = get_city_data()
self.obs_means = data.groupby('fips').lradon.mean()
lradon = data.lradon.as_matrix()
floor = data.floor.as_matrix()
group = data.group.as_matrix()
ufull = data.Uppm.as_matrix()
with pm.Model() as model:
groupmean = pm.Normal('groupmean', 0, 10. ** -2.)
groupsd = pm.Uniform('groupsd', 0, 10.)
sd = pm.Uniform('sd', 0, 10.)
floor_m = pm.Normal('floor_m', 0, 5. ** -2.)
u_m = pm.Normal('u_m', 0, 5. ** -2)
means = pm.Normal('means', groupmean, groupsd ** -2., shape=len(self.obs_means))
pm.Normal('lr', floor * floor_m + means[group] + ufull * u_m, sd ** - 2.,
observed=lradon)
return model
def too_slow(self):
model = self.build_model()
with model:
start = pm.Point({
'groupmean': self.obs_means.mean(),
'groupsd_interval__': 0,
'sd_interval__': 0,
'means': np.array(self.obs_means),
'u_m': np.array([.72]),
'floor_m': 0.,
})
start = pm.find_MAP(start, model.vars[:-1])
H = model.fastd2logp()
h = np.diag(H(start))
step = pm.HamiltonianMC(model.vars, h)
pm.sample(50, step=step, start=start)
def build_disaster_model(masked=False):
disasters_data = np.array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
if masked:
disasters_data[[23, 68]] = -1
disasters_data = np.ma.masked_values(disasters_data, value=-1)
years = len(disasters_data)
with pm.Model() as model:
# Prior for distribution of switchpoint location
switchpoint = pm.DiscreteUniform('switchpoint', lower=0, upper=years)
# Priors for pre- and post-switch mean number of disasters
early_mean = pm.Exponential('early_mean', lam=1.)
late_mean = pm.Exponential('late_mean', lam=1.)
# Allocate appropriate Poisson rates to years before and after current
# switchpoint location
idx = np.arange(years)
rate = tt.switch(switchpoint >= idx, early_mean, late_mean)
# Data likelihood
pm.Poisson('disasters', rate, observed=disasters_data)
return model
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
class TestDisasterModel(SeededTest):
# Time series of recorded coal mining disasters in the UK from 1851 to 1962
def test_disaster_model(self):
model = build_disaster_model(masked=False)
with model:
# Initial values for stochastic nodes
start = {'early_mean': 2., 'late_mean': 3.}
# Use slice sampler for means (other varibles auto-selected)
step = pm.Slice([model.early_mean_log__, model.late_mean_log__])
tr = pm.sample(500, tune=50, start=start, step=step, chains=2)
pm.summary(tr)
def test_disaster_model_missing(self):
model = build_disaster_model(masked=True)
with model:
# Initial values for stochastic nodes
start = {'early_mean': 2., 'late_mean': 3.}
# Use slice sampler for means (other varibles auto-selected)
step = pm.Slice([model.early_mean_log__, model.late_mean_log__])
tr = pm.sample(500, tune=50, start=start, step=step, chains=2)
pm.summary(tr)
class TestGLMLinear(SeededTest):
def build_model(self):
size = 50
true_intercept = 1
true_slope = 2
self.x = np.linspace(0, 1, size)
self.y = true_intercept + self.x * true_slope + np.random.normal(scale=.5, size=size)
data = dict(x=self.x, y=self.y)
with pm.Model() as model:
pm.GLM.from_formula('y ~ x', data)
return model
def test_run(self):
with self.build_model():
start = pm.find_MAP(method="Powell")
pm.sample(50, pm.Slice(), start=start)
class TestLatentOccupancy(SeededTest):
"""
From the PyMC example list
latent_occupancy.py
Simple model demonstrating the estimation of occupancy, using latent variables. Suppose
a population of n sites, with some proportion pi being occupied. Each site is surveyed,
yielding an array of counts, y:
y = [3, 0, 0, 2, 1, 0, 1, 0, ..., ]
This is a classic zero-inflated count problem, where more zeros appear in the data than would
be predicted by a simple Poisson model. We have, in fact, a mixture of models; one, conditional
on occupancy, with a poisson mean of theta, and another, conditional on absence, with mean zero.
One way to tackle the problem is to model the latent state of 'occupancy' as a Bernoulli
variable at each site, with some unknown probability:
z_i ~ Bern(pi)
These latent variables can then be used to generate an array of Poisson parameters:
t_i = theta (if z_i=1) or 0 (if z_i=0)
Hence, the likelihood is just:
y_i = Poisson(t_i)
(Note in this elementary model, we are ignoring the issue of imperfect detection.)
Created by Chris Fonnesbeck on 2008-07-28.
Copyright (c) 2008 University of Otago. All rights reserved.
"""
def setup_method(self):
super(TestLatentOccupancy, self).setup_method()
# Sample size
n = 100
# True mean count, given occupancy
theta = 2.1
# True occupancy
pi = 0.4
# Simulate some data data
self.y = ((np.random.random(n) < pi) * np.random.poisson(lam=theta, size=n)).astype('int16')
def build_model(self):
with pm.Model() as model:
# Estimated occupancy
psi = pm.Beta('psi', 1, 1)
# Latent variable for occupancy
pm.Bernoulli('z', psi, shape=self.y.shape)
# Estimated mean count
theta = pm.Uniform('theta', 0, 100)
# Poisson likelihood
pm.ZeroInflatedPoisson('y', theta, psi, observed=self.y)
return model
def test_run(self):
model = self.build_model()
with model:
start = {
'psi': np.array(0.5, dtype='f'),
'z': (self.y > 0).astype('int16'),
'theta': np.array(5, dtype='f'),
}
step_one = pm.Metropolis([model.theta_interval__, model.psi_logodds__])
step_two = pm.BinaryMetropolis([model.z])
pm.sample(50, step=[step_one, step_two], start=start, chains=1)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32 due to starting inf at starting logP")
class TestRSV(SeededTest):
'''
This model estimates the population prevalence of respiratory syncytial virus
(RSV) among children in Amman, Jordan, based on 3 years of admissions diagnosed
with RSV to Al Bashir hospital.
To estimate this parameter from raw counts of diagnoses, we need to establish
the population of 1-year-old children from which the diagnosed individuals
were sampled. This involved correcting census data (national estimate of
1-year-olds) for the proportion of the population in the city, as well as for
the market share of the hospital. The latter is based on expert esimate, and
hence encoded as a prior.
'''
def build_model(self):
# 1-year-old children in Jordan
kids = np.array([180489, 191817, 190830])
# Proportion of population in Amman
amman_prop = 0.35
# infant RSV cases in Al Bashir hostpital
rsv_cases = np.array([40, 59, 65])
with pm.Model() as model:
# Al Bashir hospital market share
market_share = pm.Uniform('market_share', 0.5, 0.6)
# Number of 1 y.o. in Amman
n_amman = pm.Binomial('n_amman', kids, amman_prop, shape=3)
# Prior probability
prev_rsv = pm.Beta('prev_rsv', 1, 5, shape=3)
# RSV in Amman
y_amman = pm.Binomial('y_amman', n_amman, prev_rsv, shape=3, testval=100)
# Likelihood for number with RSV in hospital (assumes Pr(hosp | RSV) = 1)
pm.Binomial('y_hosp', y_amman, market_share, observed=rsv_cases)
return model
def test_run(self):
with self.build_model():
pm.sample(50, step=[pm.NUTS(), pm.Metropolis()])
| 38.592834
| 129
| 0.582799
|
4a0f0e7d7176225d45bc8b8cb8e200f71fa45283
| 289
|
py
|
Python
|
iris_sdk/models/maps/lnpchecker_response.py
|
NumberAI/python-bandwidth-iris
|
0e05f79d68b244812afb97e00fd65b3f46d00aa3
|
[
"MIT"
] | 2
|
2020-04-13T13:47:59.000Z
|
2022-02-23T20:32:41.000Z
|
iris_sdk/models/maps/lnpchecker_response.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5
|
2020-09-18T20:59:24.000Z
|
2021-08-25T16:51:42.000Z
|
iris_sdk/models/maps/lnpchecker_response.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5
|
2018-12-12T14:39:50.000Z
|
2020-11-17T21:42:29.000Z
|
#!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap
class LnpCheckerResponseMap(BaseMap):
partner_supported_rate_centers = None
portable_numbers = None
supported_losing_carriers = None
supported_rate_centers = None
unsupported_rate_centers = None
| 26.272727
| 49
| 0.788927
|
4a0f0e9c4ab7a2bda70baf84112d05bd5e2d90b4
| 1,780
|
py
|
Python
|
src/dr_data/utilities/db.py
|
sunnysidesounds/dr-data
|
764e577a3a97144ad73ea1ada2ead9cbfebe5a37
|
[
"MIT"
] | 4
|
2022-01-05T22:53:16.000Z
|
2022-01-25T23:52:42.000Z
|
src/dr_data/utilities/db.py
|
sunnysidesounds/dr_data
|
764e577a3a97144ad73ea1ada2ead9cbfebe5a37
|
[
"MIT"
] | null | null | null |
src/dr_data/utilities/db.py
|
sunnysidesounds/dr_data
|
764e577a3a97144ad73ea1ada2ead9cbfebe5a37
|
[
"MIT"
] | null | null | null |
import logging
import sys
import psycopg2
from dr_data.static_strings import *
__author__ = AUTHOR
__copyright__ = COPYRIGHT
__license__ = LICENSE
_logger = logging.getLogger(__name__)
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
class DatabaseUtility:
"""
Database utility class
"""
def __init__(self, conn_info):
"""
Constructor of the DatabaseUtility
:param conn_info: database connection info
:type conn_info: dict
"""
self.connection = psycopg2.connect(**conn_info)
self.database = conn_info['database']
self.cursor = self.connection.cursor()
def truncate_db(self):
"""
Truncates all of the delete in the database
:return: None
:rtype: None
"""
try:
self.cursor.execute("""
CREATE OR REPLACE FUNCTION truncate_tables(username IN VARCHAR) RETURNS void AS $$
DECLARE
statements CURSOR FOR
SELECT tablename FROM pg_tables
WHERE tableowner = username AND schemaname = 'public';
BEGIN
FOR stmt IN statements LOOP
EXECUTE 'TRUNCATE TABLE ' || quote_ident(stmt.tablename) || ' CASCADE;';
END LOOP;
END;
$$ LANGUAGE plpgsql;""")
self.cursor.execute("""SELECT truncate_tables('postgres');""")
except Exception as error:
logging.info("- FAILED truncating db ")
logging.info("\n")
logging.info("ERROR: {error}".format(error=error))
self.connection.rollback()
self.cursor.close()
sys.exit()
else:
self.connection.commit()
| 31.22807
| 94
| 0.582584
|
4a0f0ebebcb2129d3fa0adcfb270f332acaa9bd0
| 6,750
|
py
|
Python
|
steam/errors.py
|
smtp639/steam.py
|
bd67e8d91ac17984ef0657fa4625eb2fca81fb68
|
[
"MIT"
] | null | null | null |
steam/errors.py
|
smtp639/steam.py
|
bd67e8d91ac17984ef0657fa4625eb2fca81fb68
|
[
"MIT"
] | null | null | null |
steam/errors.py
|
smtp639/steam.py
|
bd67e8d91ac17984ef0657fa4625eb2fca81fb68
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2020 James
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Optional
from bs4 import BeautifulSoup
from .enums import Result
if TYPE_CHECKING:
from aiohttp import ClientResponse
from .gateway import Msgs
__all__ = (
"SteamException",
"NotFound",
"Forbidden",
"LoginError",
"NoCMsFound",
"HTTPException",
"ClientException",
"ConfirmationError",
"AuthenticatorError",
"InvalidCredentials",
"WSException",
"WSForbidden",
"WSNotFound",
"InvalidSteamID",
)
CODE_FINDER = re.compile(r"\S(\d+)\S")
class SteamException(Exception):
"""Base exception class for steam.py."""
class ClientException(SteamException):
"""Exception that's thrown when something in the client fails.
Subclass of :exc:`SteamException`.
"""
class HTTPException(SteamException):
"""Exception that's thrown for any web API error.
Subclass of :exc:`SteamException`.
Attributes
------------
response: :class:`aiohttp.ClientResponse`
The response of the failed HTTP request.
message: :class:`str`
The message associated with the error.
Could be an empty string if no message can parsed.
status: :class:`int`
The status code of the HTTP request.
code: :class:`.Result`
The Steam specific error code for the failure.
"""
def __init__(self, response: ClientResponse, data: Optional[Any]):
self.response = response
self.status = response.status
self.code = Result.Invalid
self.message = ""
if data:
if isinstance(data, dict):
if len(data) != 1 and data.get("success", False): # ignore {'success': False} as the message
message = data.get("message") or str(list(data.values())[0])
code = (
data.get("result") # try the data if possible
or response.headers.get("X-Result") # then the headers
or CODE_FINDER.findall(message) # finally the message
)
if code:
if isinstance(code, list):
self.message = CODE_FINDER.sub("", message)
code = code[0]
self.code = Result.try_value(int(code))
else:
text = BeautifulSoup(data, "html.parser").get_text("\n")
self.message = text or ""
self.message = self.message.replace(" ", " ")
super().__init__(
f"{response.status} {response.reason} (error code: {self.code})"
f"{f': {self.message}' if self.message else ''}"
)
class Forbidden(HTTPException):
"""Exception that's thrown when status code 403 occurs.
Subclass of :exc:`HTTPException`.
"""
class NotFound(HTTPException):
"""Exception that's thrown when status code 404 occurs.
Subclass of :exc:`HTTPException`.
"""
class WSException(SteamException):
"""Exception that's thrown for any web API error. Similar to :exc:`HTTPException`.
Subclass of :exc:`SteamException`.
Attributes
------------
msg: Union[:class:`~steam.protobufs.MsgProto`, :class:`~steam.protobufs.Msg`]
The received protobuf.
message: Optional[:class:`str`]
The message that Steam sent back with the request, could be ``None``.
code: :class:`~steam.Result`
The Steam specific error code for the failure. It will attempt to find a matching a :class:`~steam.Result`
for the value.
"""
def __init__(self, msg: Msgs):
self.msg = msg
self.code = msg.result or Result.Invalid
self.message = getattr(msg.header.body, "error_message", None)
super().__init__(
f"The request {msg.header.body.job_name_target} failed. (error code: {self.code!r})"
f"{f': {self.message}' if self.message else ''}"
)
class WSForbidden(WSException):
"""Exception that's thrown when the websocket returns an :class:`.Result` that means we do not have permission
to perform an action. Similar to :exc:`Forbidden`.
Subclass of :exc:`WSException`.
"""
class WSNotFound(WSException):
"""Exception that's thrown when the websocket returns an :class:`.Result` that means the object wasn't found.
Similar to :exc:`NotFound`.
Subclass of :exc:`WSException`.
"""
class LoginError(SteamException):
"""Exception that's thrown when a login fails.
Subclass of :exc:`SteamException`.
"""
class InvalidCredentials(LoginError):
"""Exception that's thrown when credentials are incorrect.
Subclass of :exc:`LoginError`.
"""
class AuthenticatorError(ClientException):
"""Exception that's thrown when Steam cannot authenticate your details.
Subclass of :exc:`LoginError`.
"""
class ConfirmationError(AuthenticatorError):
"""Exception that's thrown when a confirmation fails.
Subclass of :exc:`AuthenticatorError`.
"""
class NoCMsFound(LoginError):
"""Exception that's thrown when no CMs can be found to connect to.
Subclass of :exc:`LoginError`.
"""
class InvalidSteamID(SteamException):
"""Exception that's thrown when a SteamID cannot be valid.
Subclass of :exc:`SteamException`.
Attributes
----------
id: :class:`int`
The invalid id.
"""
def __init__(self, id: Any, msg: Optional[str] = None):
self.id = id
super().__init__(
f"{id!r} cannot be converted to any valid SteamID {f'as {msg}' if msg is not None else ''}".strip()
)
| 29.735683
| 114
| 0.645778
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.