id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
440651
|
from atlassian import Jira
# Issues can be 1 or more
issues_lst = ["APA-1", "APA-2"]
sprint_id = 103
jira = Jira(url="http://localhost:8080", username="admin", password="<PASSWORD>")
resp = jira.add_issues_to_sprint(sprint_id=sprint_id, issues=issues_lst)
|
440687
|
from ub import bot
from ub.utils import admin_cmd
@bot.on(admin_cmd(pattern=r"space"))
async def space(e):
await e.edit("ㅤ")
@bot.on(admin_cmd(pattern=r"blank"))
async def blank(e):
await e.edit("")
|
440715
|
from veros.setups.global_flexible.global_flexible import GlobalFlexibleResolutionSetup # noqa: F401
|
440719
|
import pytest
from model_mommy import mommy
from rest_framework import status
from usaspending_api.common.helpers.fiscal_year_helpers import current_fiscal_year
url = "/api/v2/agency/{toptier_code}/recipients/{filter}"
recipient_agency_list = [
{
"id": 1,
"fiscal_year": 2021,
"toptier_code": "015",
"recipient_hash": "b4ba7c9d-a682-6f9a-7bc3-edc035067702",
"recipient_name": "DYNEGY ENERGY SERVICES (EAST), LLC",
"recipient_amount": 168000,
},
{
"id": 2,
"fiscal_year": 2021,
"toptier_code": "015",
"recipient_hash": "34fc1b0c-95f3-035d-42a9-d745257b8faf",
"recipient_name": "EMA FOODS CO., LLC",
"recipient_amount": 21532,
},
{
"id": 3,
"fiscal_year": 2021,
"toptier_code": "015",
"recipient_hash": "5941fc42-967f-ce5b-2f94-97997ba30637",
"recipient_name": "FOUR POINTS TECHNOLOGY,L.L.C.",
"recipient_amount": 34029.61,
},
{
"id": 4,
"fiscal_year": 2021,
"toptier_code": "015",
"recipient_hash": "a289842c-234a-0adf-6930-b7ac4f7282f6",
"recipient_name": "QUEST DIAGNOSTICS INCORPORATED",
"recipient_amount": 450000,
},
{
"id": 5,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "5932489a-47f0-360c-80f8-ef9cc27e443f",
"recipient_name": "ALL CLEAN WATER SOLUTIONS, LLC",
"recipient_amount": 2300.6,
},
{
"id": 6,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "9468f690-7dbc-eabb-00ac-5d5566db1b4c",
"recipient_name": "<NAME>, INC.",
"recipient_amount": 33350,
},
{
"id": 7,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "02846869-66a8-d17d-0527-9ba71f4e000a",
"recipient_name": "ADAPT PHARMA INC.",
"recipient_amount": 1696.8,
},
{
"id": 8,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "16e8fb6e-cd35-e399-3add-27bee212372f",
"recipient_name": "ALFONSO & ASSOCIATES CONSULTING, INC.",
"recipient_amount": 58937.96,
},
{
"id": 9,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "d29c73cf-db68-e237-70eb-d2392fde1298",
"recipient_name": "ALPHA SIX CORPORATION",
"recipient_amount": 933821,
},
{
"id": 10,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "bf66baaf-2a72-ad0c-2def-bc87400a6911",
"recipient_name": "AMERICAN CORRECTIONAL HEALTHCARE, INC.",
"recipient_amount": 9900,
},
{
"id": 11,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "c5f87afe-f876-302d-1680-20c49c01abc7",
"recipient_name": "AMERICAN SANITARY PRODUCTS, INC.",
"recipient_amount": 6566,
},
{
"id": 12,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "7a3b7575-de02-32b6-ea5f-d5e37b27ce3d",
"recipient_name": "AT&T MOBILITY LLC",
"recipient_amount": 15365.79,
},
{
"id": 13,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "37cc9842-7c9f-bdbb-2540-e6a71e850c8e",
"recipient_name": "BLAUER MANUFACTURING CO, INC.",
"recipient_amount": 1965.75,
},
{
"id": 14,
"fiscal_year": 2020,
"toptier_code": "015",
"recipient_hash": "f5a41291-f5fe-f5cd-a21f-61fd556bf181",
"recipient_name": "BLUE CONSTRUCTION SERVICES LLC",
"recipient_amount": 479188,
},
{
"id": 15,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "4bc16929-fd89-9dac-4c4c-914381f2f65f",
"recipient_name": "INSPECTION EXPERTS,INC.",
"recipient_amount": 11721,
},
{
"id": 16,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "7e7f87d3-e58b-9434-3830-7f838baecb77",
"recipient_name": "INSURANCE AUSTRALIA LIMITED",
"recipient_amount": 11818.01,
},
{
"id": 17,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "45d708ee-157f-8ff0-235d-37f6797a104c",
"recipient_name": "INTERIM HOMES, INC.",
"recipient_amount": 5113080,
},
{
"id": 18,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "651bc003-aa61-2b35-8598-9215c6d2699e",
"recipient_name": "IRON BOW TECHNOLOGIES, LLC",
"recipient_amount": 30654,
},
{
"id": 19,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "68d7743b-8df8-2970-11bc-6c5d9f523508",
"recipient_name": "<NAME>, INC.",
"recipient_amount": 3465,
},
{
"id": 20,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "fc7c787c-0a87-88d5-b7ac-49ae545875ad",
"recipient_name": "<NAME>URIST TRANSPORT CO.LTD./JETT",
"recipient_amount": 1079.1,
},
{
"id": 21,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "8b08a942-b97d-b64a-bf68-25f436a29e27",
"recipient_name": "<NAME>",
"recipient_amount": 4860,
},
{
"id": 22,
"fiscal_year": 2020,
"toptier_code": "019",
"recipient_hash": "2ae6ba54-eefe-348c-e889-2c6ddc9554a8",
"recipient_name": "KENJYA-TRUSANT GROUP, LLC, THE",
"recipient_amount": 133961.23,
},
]
@pytest.fixture
def recipient_agency_data():
toptier_agency_1 = mommy.make("references.ToptierAgency", toptier_code="015", name="Agency 1", abbreviation="A1")
toptier_agency_2 = mommy.make("references.ToptierAgency", toptier_code="019", name="Agency 2", abbreviation="A2")
mommy.make("references.Agency", toptier_agency=toptier_agency_1, toptier_flag=True, user_selectable=True)
mommy.make("references.Agency", toptier_agency=toptier_agency_2, toptier_flag=True, user_selectable=True)
dabs1 = mommy.make(
"submissions.DABSSubmissionWindowSchedule",
submission_fiscal_year=2020,
submission_fiscal_month=12,
submission_fiscal_quarter=4,
submission_reveal_date="1999-01-01",
)
dabs2 = mommy.make(
"submissions.DABSSubmissionWindowSchedule",
submission_fiscal_year=2021,
submission_fiscal_month=12,
submission_fiscal_quarter=4,
submission_reveal_date="1999-01-01",
)
mommy.make("submissions.SubmissionAttributes", toptier_code=toptier_agency_1.toptier_code, submission_window=dabs1)
mommy.make("submissions.SubmissionAttributes", toptier_code=toptier_agency_1.toptier_code, submission_window=dabs2)
mommy.make("submissions.SubmissionAttributes", toptier_code=toptier_agency_2.toptier_code, submission_window=dabs1)
for recipient_lookup in recipient_agency_list:
mommy.make("recipient.RecipientAgency", **recipient_lookup)
@pytest.mark.django_db
def test_basic_success(client, monkeypatch, recipient_agency_data):
resp = client.get(url.format(toptier_code="015", filter="?fiscal_year=2021"))
assert resp.status_code == status.HTTP_200_OK
expected_results = {
"toptier_code": "015",
"fiscal_year": 2021,
"count": 4,
"total_federal_count": 4,
"max": 450000.0,
"min": 21532.0,
"25th_percentile": 21532.0,
"50th_percentile": 34029.61,
"75th_percentile": 168000.0,
"messages": [],
}
assert resp.json() == expected_results
resp = client.get(url.format(toptier_code="015", filter="?fiscal_year=2020"))
assert resp.status_code == status.HTTP_200_OK
expected_results = {
"toptier_code": "015",
"fiscal_year": 2020,
"count": 10,
"total_federal_count": 18,
"max": 933821.0,
"min": 1696.8,
"25th_percentile": 2300.6,
"50th_percentile": 9900.0,
"75th_percentile": 58937.96,
"messages": [],
}
assert resp.json() == expected_results
resp = client.get(url.format(toptier_code="019", filter="?fiscal_year=2020"))
assert resp.status_code == status.HTTP_200_OK
expected_results = {
"toptier_code": "019",
"fiscal_year": 2020,
"count": 8,
"total_federal_count": 18,
"max": 5113080.0,
"min": 1079.1,
"25th_percentile": 3465.0,
"50th_percentile": 11721.0,
"75th_percentile": 30654.0,
"messages": [],
}
assert resp.json() == expected_results
@pytest.mark.django_db
def test_invalid_fiscal_year(client, recipient_agency_data):
query_params = f"?fiscal_year={current_fiscal_year() + 1}"
resp = client.get(url.format(toptier_code="015", filter=query_params))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_invalid_agency(client, recipient_agency_data):
resp = client.get(url.format(toptier_code="999", filter=""))
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
440724
|
from selftarget.util import runPerSubdir
if __name__ == '__main__':
runPerSubdir('compile_i1.py', 'out_i1', __file__, extra_args='. ')
|
440738
|
from schemer import Schema, Array
from schemer.validators import one_of
from .test_schema import test_schema
from .asset_schema import asset_schema
def image_build_isolation_validator():
def validator(document):
if 'image' in document and 'build' in document:
return 'Only one of image and build is allowed in app schema'
elif 'image' not in document and 'build' not in document:
return 'Need to have at least one of `image` or `build` in app schema'
return validator
def repo_mount_validator():
"""If either repo or mount are provided, they must both be provided."""
def validator(document):
if 'repo' in document and 'mount' in document:
return
elif 'repo' not in document and 'mount' not in document:
return
return 'If either `repo` or `mount` are provided, they must both be provided.'
return validator
app_depends_schema = Schema({
'services': {'type': Array(basestring), 'default': list},
'apps': {'type': Array(basestring), 'default': list},
'libs': {'type': Array(basestring), 'default': list}
})
conditional_links_schema = Schema({
'services': {'type': Array(basestring), 'default': list},
'apps': {'type': Array(basestring), 'default': list},
})
host_forwarding_schema = Schema({
'host_name': {'type': basestring},
'host_port': {'type': int},
'container_port': {'type': int},
'type': {'type': basestring, 'default': 'http', 'validates': one_of('http', 'stream')}
})
commands_schema = Schema({
'always': {'type': Array(basestring), 'required': True, 'default': list},
'once': {'type': Array(basestring), 'default': list}
})
script_schema = Schema({
'name': {'type': basestring, 'required': True},
'description': {'type': basestring},
'command': {'type': Array(basestring), 'required': True}
})
dusty_app_compose_schema = Schema({
'volumes': {'type': Array(basestring), 'default': list}
}, strict=False)
app_schema = Schema({
'repo': {'type': basestring, 'default': str},
'depends': {'type': app_depends_schema, 'default': dict},
'conditional_links': {'type': conditional_links_schema, 'default': dict},
'host_forwarding': {'type': Array(host_forwarding_schema), 'default': list},
'image': {'type': basestring},
'image_requires_login': {'type': bool, 'default': False},
'build': {'type': basestring},
'mount': {'type': basestring, 'default': str},
'commands': {'type': commands_schema, 'required': True},
'scripts': {'type': Array(script_schema), 'default': list},
'assets': {'type': Array(asset_schema), 'default': list},
'compose': {'type': dusty_app_compose_schema, 'default': dict},
'test': {'type': test_schema, 'default': dict}
}, validates=[
image_build_isolation_validator(),
repo_mount_validator(),
])
|
440749
|
import os
import sys
import uuid
import errno
import tempfile
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith('win')
if PY2:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iteritems = lambda x: x.iteritems()
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
else:
text_type = str
string_types = (str,)
integer_types = (int,)
iteritems = lambda x: iter(x.items())
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def _posixify(name):
return '-'.join(name.split()).lower()
def iter_from_file(f, encoding=None):
if encoding is None:
encoding = 'utf-8-sig'
return (x.decode(encoding, 'replace') for x in f)
def get_app_dir(app_name, roaming=True, force_posix=False):
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Win XP (roaming):
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
Win XP (not roaming):
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
Win 7 (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Win 7 (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no affect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = roaming and 'APPDATA' or 'LOCALAPPDATA'
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser('~')
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
if sys.platform == 'darwin':
return os.path.join(os.path.expanduser(
'~/Library/Application Support'), app_name)
return os.path.join(
os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
_posixify(app_name))
class Dialect(object):
"""This class allows customizing the dialect of the ini file. The
default configuration is a compromise between the general Windows
format and what's common on Unix systems.
Example dialect config::
unix_dialect = Dialect(
kv_sep=': ',
quotes=("'",),
comments=('#',),
)
:param ns_sep: the namespace separator. This character is used to
create hierarchical structures in sections and also
placed between section and field.
:param kv_sep: the separator to be placed between key and value. For
parsing whitespace is automatically removed.
:param quotes: a list of quote characters supported for strings. The
leftmost one is automatically used for serialization,
the others are supported for deserialization.
:param true: strings that should be considered boolean true.
:param false: strings that should be considered boolean false.
:param comments: comment start markers.
:param allow_escaping: enables or disables backslash escapes.
:param linesep: a specific line separator to use other than the
operating system's default.
"""
def __init__(self, ns_sep='.', kv_sep=' = ', quotes=('"', "'"),
true=('true', 'yes', '1'), false=('false', 'no', '0'),
comments=('#', ';'), allow_escaping=True, linesep=None):
self.ns_sep = ns_sep
self.kv_sep = kv_sep
self.plain_kv_sep = kv_sep.strip()
self.quotes = quotes
self.true = true
self.false = false
self.comments = comments
self.allow_escaping = allow_escaping
self.linesep = linesep
def get_actual_linesep(self):
if self.linesep is None:
return os.linesep
return self.linesep
def get_strippable_lineseps(self):
if self.linesep is None or self.linesep in '\r\n':
return '\r\n'
return self.linesep
def kv_serialize(self, key, val):
if val is None:
return None
if self.quotes and val.split() != [val]:
q = self.quotes[0]
if self.allow_escaping:
val = self.escape(val, q)
val = '%s%s%s' % (q, val, q)
return '%s%s%s' % (key, self.kv_sep, val)
def escape(self, value, quote=None):
value = value \
.replace('\\', '\\\\') \
.replace('\n', '\\n') \
.replace('\r', '\\r') \
.replace('\t', '\\t')
for q in self.quotes:
if q != quote:
value = value.replace(q, '\\' + q)
return value
def unescape(self, value):
value = value \
.replace('\\n', '\n') \
.replace('\\r', '\r') \
.replace('\\t', '\t') \
.replace('\\"', '"')
for q in self.quotes:
value = value.replace('\\' + q, q)
return value
def to_string(self, value):
if value is True:
return self.true[0]
if value is False:
return self.false[0]
if isinstance(value, integer_types) or isinstance(value, float):
return text_type(value)
if not isinstance(value, string_types):
raise TypeError('Cannot set value of this type')
return text_type(value)
def dict_from_iterable(self, iterable):
"""Builds a mapping of values out of an iterable of lines."""
mapping = OrderedDict()
for token, _, data in self.tokenize(iterable):
if token == 'KV':
section, key, value = data
mapping[self.ns_sep.join(section + (key,))] = value
return mapping
def tokenize(self, iterable):
"""Tokenizes an iterable of lines."""
section = ()
line_strip = self.get_strippable_lineseps()
for line in iterable:
line = line.rstrip(line_strip)
if not line.strip():
yield 'EMPTY', line, None
elif line.lstrip()[:1] in self.comments:
yield 'COMMENT', line, None
elif line[:1] == '[' and line[-1:] == ']':
section = tuple(line[1:-1].strip().split(self.ns_sep))
yield 'SECTION', line, section
elif self.plain_kv_sep in line:
key, value = line.split(self.plain_kv_sep, 1)
value = value.strip()
if value[:1] in self.quotes and value[:1] == value[-1:]:
value = value[1:-1]
if self.allow_escaping:
value = self.unescape(value)
yield 'KV', line, (section, key.strip(), value)
def update_tokens(self, old_tokens, changes):
"""Given the tokens returned from :meth:`tokenize` and a dictionary
of new values (or `None` for values to be deleted) returns a new
list of tokens that should be written back to a file.
"""
new_tokens = []
section_ends = {None: 0}
pending_changes = dict(changes)
for token, line, data in old_tokens:
if token == 'KV':
section, key, value = data
k = self.ns_sep.join(section + (key,))
if k in pending_changes:
value = pending_changes.pop(k)
line = self.kv_serialize(key, value)
data = (section, key, value)
section_ends[self.ns_sep.join(section)] = len(new_tokens)
elif token == 'SECTION':
section_ends[self.ns_sep.join(data)] = len(new_tokens)
new_tokens.append((token, line, data))
pending_by_sec = {}
for key, value in sorted(pending_changes.items()):
section, local_key = key.rsplit(self.ns_sep, 1)
pending_by_sec.setdefault(section, []).append((local_key, value))
if pending_by_sec:
section_ends_r = dict((v, k) for k, v in section_ends.items())
final_lines = []
for idx, (token, line, data) in enumerate(new_tokens):
final_lines.append((token, line, data))
section = section_ends_r.get(idx)
if section is not None and section in pending_by_sec:
for local_key, value in pending_by_sec.pop(section):
final_lines.append((
'KV',
self.kv_serialize(local_key, value),
(section, local_key, value),
))
for section, items in sorted(pending_by_sec.items()):
if final_lines:
final_lines.append(('EMPTY', u'', None))
final_lines.append(('SECTION', u'[%s]' % section, section))
for local_key, value in items:
final_lines.append((
'KV',
self.kv_serialize(local_key, value),
(section, local_key, value),
))
new_tokens = final_lines
return [x for x in new_tokens if x[1] is not None]
default_dialect = Dialect()
class IniData(MutableMapping):
"""This object behaves similar to a dictionary but it tracks
modifications properly so that it can later write them back to an INI
file with the help of the ini dialect, without destroying ordering or
comments.
This is rarely used directly, instead the :class:`IniFile` is normally
used.
This generally works similar to a dictionary and exposes the same
basic API.
"""
def __init__(self, mapping=None, dialect=None):
if dialect is None:
dialect = default_dialect
self.dialect = dialect
if mapping is None:
mapping = {}
self._primary = mapping
self._changes = {}
@property
def is_dirty(self):
"""This is true if the data was modified."""
return bool(self._changes)
def get_updated_lines(self, line_iter=None):
"""Reconciles the updates in the ini data with the iterator of
lines from the source file and returns a list of the new lines
as they should be written into the file.
"""
return self.dialect.update_tokens(line_iter or (), self._changes)
def discard(self):
"""Discards all local modifications in the ini data."""
self._changes.clear()
def rollover(self):
"""Rolls all local modifications to the primary data. After this
modifications are no longer tracked and `get_updated_lines` will
not return them.
"""
self._primary = OrderedDict(self.iteritems())
self.discard()
def to_dict(self):
"""Returns the current ini data as dictionary."""
return dict(self.iteritems())
def __len__(self):
rv = len(self._primary)
for key, value in iteritems(self._changes):
if key in self._primary and value is not None:
rv += 1
return rv
def get(self, name, default=None):
"""Return a value for a key or return a default if the key does
not exist.
"""
try:
return self[name]
except KeyError:
return default
def get_ascii(self, name, default=None):
"""This returns a value for a key for as long as the value fits
into ASCII. Otherwise (or if the key does not exist) the default
is returned. This is especially useful on Python 2 when working
with some APIs that do not support unicode.
"""
try:
rv = self[name]
try:
rv.encode('ascii')
except UnicodeError:
raise KeyError()
if PY2:
rv = str(rv)
return rv
except KeyError:
return default
def get_bool(self, name, default=False):
"""Returns a value as boolean. What constitutes as a valid boolean
value depends on the dialect.
"""
try:
rv = self[name].lower()
if rv in self.dialect.true:
return True
if rv in self.dialect.false:
return False
raise KeyError()
except KeyError:
return default
def get_int(self, name, default=None):
"""Returns a value as integer."""
try:
return int(self[name])
except (ValueError, KeyError):
return default
def get_float(self, name, default=None):
"""Returns a value as float."""
try:
return float(self[name])
except (ValueError, KeyError):
return default
def get_uuid(self, name, default=None):
"""Returns a value as uuid."""
try:
return uuid.UUID(self[name])
except Exception:
return default
def itersections(self):
"""Iterates over the sections of the sections of the ini."""
seen = set()
sep = self.dialect.ns_sep
for key in self:
if sep in key:
section = key.rsplit(sep, 1)[0]
if section not in seen:
seen.add(section)
yield section
if PY2:
def sections(self):
"""Returns a list of the sections in the ini file."""
return list(self.itersections())
else:
sections = itersections
def iteritems(self):
for key in self._primary:
try:
yield key, self[key]
except LookupError:
pass
for key in self._changes:
if key not in self._primary:
try:
yield key, self[key]
except LookupError:
pass
def iterkeys(self):
for key, _ in self.iteritems():
yield key
def itervalues(self):
for _, value in self.iteritems():
yield value
__iter__ = iterkeys
if PY2:
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.iterkeys())
def items(self):
return list(self.iteritems())
else:
keys = iterkeys
values = itervalues
items = iteritems
def section_as_dict(self, section):
rv = {}
prefix = section + '.'
for key, value in self.iteritems():
if key.startswith(prefix):
rv[key[len(prefix):]] = value
return rv
def __getitem__(self, name):
if name in self._changes:
rv = self._changes[name]
if rv is None:
raise KeyError(name)
return rv
return self._primary[name]
def __setitem__(self, name, value):
self._changes[name] = self.dialect.to_string(value)
def __delitem__(self, name):
self._changes[name] = None
class IniFile(IniData):
"""This class implements simplified read and write access to INI files
in a way that preserves the original files as good as possible. Unlike
a regular INI serializer it only overwrites the lines that were modified.
Example usage::
ifile = IniFile('myfile.ini')
ifile['ui.username'] = 'john_doe'
ifile.save()
The ini file exposes unicode strings but utility methods are provided
for common type conversion. The default namespace separator is a dot
(``.``).
The format of the file can be configured by providing a custom
:class:`Dialect` instance to the constructor.
"""
def __init__(self, filename, encoding=None, dialect=None):
if dialect is None:
dialect = default_dialect
self.filename = os.path.abspath(filename)
self.encoding = encoding
try:
with open(filename, 'rb') as f:
mapping = dialect.dict_from_iterable(
iter_from_file(f, self.encoding))
is_new = False
except IOError as e:
if e.errno != errno.ENOENT:
raise
is_new = True
mapping = OrderedDict()
IniData.__init__(self, mapping, dialect)
#: If this is `true` the file did not exist yet (it is new). This
#: can be used to fill it with some defaults.
self.is_new = is_new
def save(self, create_folder=False):
"""Saves all modifications back to the file. By default the folder
in which the file is placed needs to exist.
"""
# No modifications means no write.
if not self.is_dirty:
return
enc = self.encoding
if enc is None:
enc = 'utf-8'
linesep = self.dialect.get_actual_linesep()
if create_folder:
folder = os.path.dirname(self.filename)
try:
os.makedirs(folder)
except OSError:
pass
try:
with open(self.filename, 'rb') as f:
old_tokens = list(self.dialect.tokenize(
iter_from_file(f, self.encoding)))
except IOError:
old_tokens = []
fd, tmp_filename = tempfile.mkstemp(
dir=os.path.dirname(self.filename), prefix='.__atomic-write')
try:
with os.fdopen(fd, 'wb') as f:
new_tokens = self.get_updated_lines(old_tokens)
for _, line, _ in new_tokens:
f.write((line + linesep).encode(enc))
except:
exc_info = sys.exc_info()
try:
os.remove(tmp_filename)
except OSError:
pass
reraise(*exc_info)
if hasattr(os, 'replace'):
os.replace(tmp_filename, self.filename)
else:
try:
os.rename(tmp_filename, self.filename)
except OSError:
if os.name == 'nt':
os.remove(self.filename)
os.rename(tmp_filename, self.filename)
else:
raise
self.rollover()
self.is_new = False
class AppIniFile(IniFile):
"""This works exactly the same as :class:`IniFile` but the ini files
are placed by default in an application config directory. This uses
the function :func:`get_app_dir` internally to calculate the path
to it. Also by default the :meth:`~IniFile.save` method will create
the folder if it did not exist yet.
Example::
from inifile import AppIniFile
config = AppIniFile('My App', 'my_config.ini')
config['ui.user_colors'] = True
config['ui.colorscheme'] = 'tango'
config.save()
"""
def __init__(self, app_name, filename, roaming=True, force_posix=False,
encoding=None, dialect=None):
app_dir = get_app_dir(app_name, roaming=roaming,
force_posix=force_posix)
IniFile.__init__(self, os.path.join(app_dir, filename),
encoding=encoding, dialect=dialect)
def save(self, create_folder=True):
return IniFile.save(self, create_folder=create_folder)
|
440772
|
from abc import ABC, abstractmethod
class AbstractOutputAdapter(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def write(self, pixel_arrays, *args, **kwargs):
pass
|
440783
|
from .pubsub import BaseEvent, Provider, Publisher, Subscriber
from .utils import retry_on_exception, chunked, sanitize_filename, fails_in_row
__all__ = ['BaseEvent', 'Provider', 'Publisher', 'Subscriber', 'retry_on_exception', 'chunked', 'sanitize_filename',
'fails_in_row']
|
440802
|
from django.contrib import admin
from django.urls import include, path
# import magic_link.urls
admin.autodiscover()
urlpatterns = [
path("admin/", admin.site.urls),
path("magic-link/", include("magic_link.urls")),
]
|
440826
|
import argparse
import os
def main(args):
init_wav_path = args.init_wav_path
init_wav_scp_file = args.init_wav_scp_file
with open(init_wav_scp_file, "w") as fw:
for item in os.listdir(init_wav_path):
print(item)
if not str(item).endswith(".wav"):
continue
fw.write(f"{item[:-4]} {os.path.join(init_wav_path, item)}\n")
print("Prepare init wav.scp finished!")
if __name__ == '__main__':
parser = argparse.ArgumentParser("Make aishell4 test")
parser.add_argument("--init_wav_path", required=True, help="The init wav path")
parser.add_argument("--init_wav_scp_file", required=True, help="The init wav.scp file")
args = parser.parse_args()
main(args)
|
440836
|
import dlutils
from packaging import version
if not hasattr(dlutils, "__version__") or version.parse(dlutils.__version__) < version.parse("0.0.11"):
raise RuntimeError('Please update dlutils: pip install dlutils --upgrade')
try:
dlutils.download.from_google_drive('170Qldnn28IwnVm9CQEq1AZhVsK7PJ0Xz', directory='training_artifacts/ffhq')
dlutils.download.from_google_drive('1QESywJW8N-g3n0Csy0clztuJV99g8pRm', directory='training_artifacts/ffhq')
dlutils.download.from_google_drive('18BzFYKS3icFd1DQKKTeje7CKbEKXPVug', directory='training_artifacts/ffhq')
except IOError:
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/ffhq/model_submitted.pth', directory='training_artifacts/ffhq')
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/ffhq/model_194.pth', directory='training_artifacts/ffhq')
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/ffhq/model_157.pth', directory='training_artifacts/ffhq')
try:
dlutils.download.from_google_drive('1T4gkE7-COHpX38qPwjMYO-xU-SrY_aT4', directory='training_artifacts/celeba')
except IOError:
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/celeba/model_final.pth', directory='training_artifacts/celeba')
try:
dlutils.download.from_google_drive('1gmYbc6Z8qJHJwICYDsB4aBMxXjnKeXA_', directory='training_artifacts/bedroom')
except IOError:
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/bedroom/model_final.pth', directory='training_artifacts/bedroom')
try:
dlutils.download.from_google_drive('1ihJvp8iJWcLxTIjkV5cyA7l9TrxlUPkG', directory='training_artifacts/celeba-hq256')
dlutils.download.from_google_drive('1gFQsGCNKo-frzKmA3aCvx07ShRymRIKZ', directory='training_artifacts/celeba-hq256')
except IOError:
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/celeba-hq256/model_262r.pth', directory='training_artifacts/celeba-hq256')
dlutils.download.from_url('https://alaeweights.s3.us-east-2.amazonaws.com/celeba-hq256/model_580r.pth', directory='training_artifacts/celeba-hq256')
|
440842
|
import unittest
from helpers.queuehelper import QueueName, QueueType, QueueEntry, QueueEntries
class TestQueuenames(unittest.TestCase):
def test_queue_valid_name(self):
self.assertTrue(QueueName.has_value(QueueName.Q_ALERT.value))
def test_queue_invalid_name(self):
self.assertFalse(QueueName.has_value('notaqueuename'))
def test_queue_alert(self):
self.assertTrue(str(QueueName.Q_ALERT) == 'q_alert')
def test_queue_value(self):
self.assertTrue(QueueName.value(QueueName.Q_ALERT) == 'alert')
def test_queue_type(self):
self.assertTrue(QueueType.broadcast == 'broadcast')
self.assertTrue(QueueType.publish == 'publish')
def test_queue_entry(self):
que = QueueEntry('', '', '')
self.assertTrue(que)
def test_queue_entries(self):
que = QueueEntries()
self.assertTrue(que)
que.add('test', 'test')
que.addbroadcast('qbroad', 'test')
que.addalert('msg')
self.assertTrue(que.hasentries())
if __name__ == '__main__':
unittest.main()
|
440844
|
import targets
# Like HiFive1, but put code in flash
class HiFive1FlashHart(targets.Hart):
xlen = 32
ram = 0x80000000
ram_size = 16 * 1024
instruction_hardware_breakpoint_count = 2
misa = 0x40001105
link_script_path = "HiFive1-flash.lds"
class HiFive1Flash(targets.Target):
harts = [HiFive1FlashHart()]
openocd_config_path = "HiFive1.cfg"
|
440921
|
import pytest
# ============================================================================
def pytest_addoption(parser):
parser.addoption('--append', default='', action='store')
parser.addoption('--auto', default='', action='store')
parser.addoption('--delete', default=False, action='store_true')
# ============================================================================
def pytest_collection_modifyitems(config, items):
append = config.getoption('--append')
delete = config.getoption('--delete')
skip_marker = pytest.mark.skip(reason='skipping others')
for item in items:
include = True
if delete:
include = ('delete' in item.keywords)
elif append:
include = ('append' in item.keywords)
else:
include = ('delete' not in item.keywords) and ('append' not in item.keywords)
if 'always' in item.keywords:
include = True
if not include:
item.add_marker(skip_marker)
@pytest.fixture
def append(request):
return request.config.getoption('--append')
@pytest.fixture
def auto_id(request):
return request.config.getoption('--auto')
|
440960
|
import argparse
import sys
def parse_arguments():
parser = argparse.ArgumentParser(
description='Compute knight\'s dialler sequence counts')
parser.add_argument('start_position', type=int, help='Starting position')
parser.add_argument('num_hops', type=int, help='Number of hops')
args = parser.parse_args()
if args.start_position < 0 or args.start_position > 9:
print('Starting position must be in [0, 9]')
sys.exit(1)
if args.num_hops < 0:
print('Number of hops must be nonnegative')
sys.exit(1)
return args
|
440962
|
import argparse
import logging
import os
import json
import collections
from agents.bert_agent.methods.baseline.dataset.dialog import flatten_variables
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger()
def clean_dataset(args):
stats = collections.defaultdict(int)
for fn in os.listdir(args.json_dir):
with open(os.path.join(args.json_dir, fn)) as f, \
open(os.path.join(args.out_dir, fn), 'w') as f_out:
dialog = json.load(f)
# remove api calls whose variables are not used.
dialog = remove_unnecessary_api_calls(stats, dialog)
# merge lat long
# dialog = merge_latlong(stats, dialog)
# move_api_call_after_agent_utter
dialog = move_api_call_after_agent_utter(stats, dialog)
# remove_duplicated_events
dialog = remove_duplicated_adjacent_events(stats, dialog)
f_out.write(json.dumps(dialog, indent=2))
for k, v in stats.items():
print('{}: {}'.format(k, v))
def merge_latlong(stats, dialog):
# update source var
source_variables = dialog['initial_variables']['variables'] \
if 'variables' in dialog['initial_variables'] \
else dialog['initial_variables']
new_source_variables = source_variables[:1] + [
{
"name": "latlong",
"value": '{}/{}'.format(source_variables[1]['value'],
source_variables[2]['value']),
"full_name": "source_latlong"
}
]
dialog["initial_variables"] = {
"variable_group": "source",
"variables": new_source_variables
}
# update events
new_events = []
var_dict = {}
var_dict['source_latitude'] = source_variables[1]
var_dict['source_longitude'] = source_variables[2]
for event in dialog['events']:
if event['event_type'] == 'agent_utterance':
new_params = []
for p in event['params']:
if p.endswith('latitude'):
new_params.append(p.replace('latitude', 'latlong'))
elif p.endswith('longitude'):
pass
else:
new_params.append(p)
event['params'] = new_params
elif event['event_type'] == 'api_call':
# load variables
variables = flatten_variables(event['variables'])
for var in variables:
var_dict[var['full_name']] = var
# update query
new_params = []
for p in event['params']:
if p['param'] == 'query':
new_params.append(p)
elif p['param'].endswith('latitude'):
p['param'] = p['param'].replace('latitude', 'latlong')
p['value'] = '{}/{}'.format(
p['value'],
var_dict[p['variable_name'].replace(
'latitude', 'longitude')]['value']
)
p['variable_name'] = p['variable_name'].replace(
'latitude', 'latlong')
new_params.append(p)
elif p['param'].endswith('longitude'):
pass
else:
new_params.append(p)
event['params'] = new_params
# update variables
new_variables = []
for var in variables:
if var['full_name'].endswith('latitude'):
longitude = var_dict[var['full_name'].replace(
'latitude', 'longitude')]
var['full_name'] = var['full_name'].replace(
'latitude', 'latlong')
var['value'] = '{}/{}'.format(var['value'],
longitude['value'])
var['name'] = 'latlong'
new_variables.append(var)
elif var['full_name'].endswith('longitude'):
pass
else:
new_variables.append(var)
event['variables'] = new_variables
elif event['event_type'] == 'end_dialog':
event['destination'] = {
'latlong': '{}/{}'.format(
event['destination']['latitude'],
event['destination']['longitude']
)
}
new_events.append(event)
# update events
dialog['events'] = new_events
return dialog
def move_api_call_after_agent_utter(stats, dialog):
events = dialog['events']
old_e = [e['event_type'] for e in events]
new_events = []
is_modified = True
while is_modified is True:
is_modified = False
new_events.clear()
idx = 0
while idx < len(events):
if events[idx]['event_type'] == 'api_call':
if events[idx + 1]['event_type'] == 'user_utterance':
new_events.append(events[idx + 1])
new_events.append(events[idx])
is_modified = True
idx = idx + 2
stats['move_api_call_after_agent_utter'] += 1
else:
new_events.append(events[idx])
idx += 1
else:
new_events.append(events[idx])
idx += 1
events = tuple(new_events)
new_e = [e['event_type'] for e in new_events]
# if old_e != new_e:
# print('old:', old_e)
# print('new:', new_e)
# update events
dialog['events'] = new_events
return dialog
def remove_duplicated_adjacent_events(stats, dialog):
new_events = []
prev_event = None
for event in dialog['events']:
if event != prev_event:
new_events.append(event)
else:
stats['remove_duplicated_adjacent_events'] += 1
prev_event = event
# update events
dialog['events'] = new_events
return dialog
def remove_unnecessary_api_calls(stats, dialog):
used_variables = set()
# get used variables
for event in dialog['events']:
stats['num_events'] += 1
if event['event_type'] == 'api_call':
stats['num_api_call_events'] += 1
for p in event['params']:
if p['variable_name'].startswith('u'):
continue
else:
used_variables.add(p['variable_name'])
elif event['event_type'] == 'agent_utterance':
for p in event['params']:
used_variables.add(p)
# remove unnecessary api_call events
new_events = []
for event in dialog['events']:
if event['event_type'] == 'api_call':
flattened_variables = flatten_variables(event['variables'])
is_necessary = False
for var in flattened_variables:
if var['full_name'] in used_variables:
is_necessary = True
break
if is_necessary:
new_events.append(event)
else:
stats['num_events_removed'] += 1
else:
new_events.append(event)
# update events
dialog['events'] = new_events
return dialog
def get_entity_from_full_name(full_name):
p = full_name.split('_')
if len(p[1]) == 1:
entity = '_'.join(p[:2])
else:
entity = '_'.join(p[:1])
return entity
def main():
parser = argparse.ArgumentParser()
parser.add_argument('json_dir')
parser.add_argument('out_dir')
args = parser.parse_args()
clean_dataset(args)
if __name__ == "__main__":
main()
|
440989
|
from records_mover.url.resolver import file_url_ctors, UrlResolver
from records_mover.url.base import BaseFileUrl
from mock import Mock
import unittest
class NeedyFileUrl(BaseFileUrl):
def __init__(self,
url,
boto3_session,
gcs_client,
gcp_credentials):
self.boto3_session = boto3_session
self.gcs_client = gcs_client
self.gcp_credentials = gcp_credentials
class TestUrlResolverNoCreds(unittest.TestCase):
def test_NeedyFileUrl_with_no_boto3(self):
mock_gcs_client = Mock(name='gcs_client')
mock_gcp_credentials = Mock(name='gcp_credentials')
resolver = UrlResolver(boto3_session_getter=lambda: None,
gcs_client_getter=lambda: mock_gcs_client,
gcp_credentials_getter=lambda: mock_gcp_credentials)
file_url_ctors['needy'] = NeedyFileUrl
needy_url = 'needy://foo/bar/baz?a=b&d=f'
with self.assertRaises(EnvironmentError):
resolver.file_url(needy_url)
def test_NeedyFileUrl_with_no_gcs_client(self):
mock_boto3_session = Mock(name='boto3_session')
mock_gcp_credentials = Mock(name='gcp_credentials')
resolver = UrlResolver(boto3_session_getter=lambda: mock_boto3_session,
gcs_client_getter=lambda: None,
gcp_credentials_getter=lambda: mock_gcp_credentials)
file_url_ctors['needy'] = NeedyFileUrl
needy_url = 'needy://foo/bar/baz?a=b&d=f'
with self.assertRaises(EnvironmentError):
resolver.file_url(needy_url)
def test_NeedyFileUrl_with_no_gcp_credentials(self):
mock_gcs_client = Mock(name='gcs_client')
mock_boto3_session = Mock(name='boto3_session')
resolver = UrlResolver(boto3_session_getter=lambda: mock_boto3_session,
gcs_client_getter=lambda: mock_gcs_client,
gcp_credentials_getter=lambda: None)
file_url_ctors['needy'] = NeedyFileUrl
needy_url = 'needy://foo/bar/baz?a=b&d=f'
with self.assertRaises(EnvironmentError):
resolver.file_url(needy_url)
|
440993
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import pywsjtx.extra.simple_server
import re
import random
TEST_MULTICAST = True
if TEST_MULTICAST:
IP_ADDRESS = '172.16.17.32'
PORT = 5007
else:
IP_ADDRESS = '127.0.0.1'
PORT = 2237
MY_MAX_SCHEMA = 3
s = pywsjtx.extra.simple_server.SimpleServer(IP_ADDRESS, PORT, timeout=2.0)
while True:
(pkt, addr_port) = s.rx_packet()
if (pkt != None):
the_packet = pywsjtx.WSJTXPacketClassFactory.from_udp_packet(addr_port, pkt)
if type(the_packet) == pywsjtx.HeartBeatPacket:
max_schema = max(the_packet.max_schema, MY_MAX_SCHEMA)
reply_beat_packet = pywsjtx.HeartBeatPacket.Builder(the_packet.wsjtx_id,max_schema)
s.send_packet(addr_port, reply_beat_packet)
if type(the_packet) == pywsjtx.DecodePacket:
m = re.match(r"^CQ\s+(\S+)\s+", the_packet.message)
if m:
print("Callsign {}".format(m.group(1)))
callsign = m.group(1)
color_pkt = pywsjtx.HighlightCallsignPacket.Builder(the_packet.wsjtx_id, callsign,
pywsjtx.QCOLOR.White(),
pywsjtx.QCOLOR.Red(),
True)
normal_pkt = pywsjtx.HighlightCallsignPacket.Builder(the_packet.wsjtx_id, callsign,
pywsjtx.QCOLOR.Uncolor(),
pywsjtx.QCOLOR.Uncolor(),
True)
s.send_packet(addr_port, color_pkt)
#print(pywsjtx.PacketUtil.hexdump(color_pkt))
print(the_packet)
|
441019
|
import json
import logging
import os
from enum import Enum
from .utils import throw_if_false
class ConfigObjectEnum(Enum):
OCTAGON_OBJECT_DATASETS = "Datasets"
OCTAGON_OBJECT_DATASCHEMAS = "DataSchemas"
OCTAGON_OBJECT_PIPELINES = "Pipelines"
OCTAGON_OBJECT_PIPELINEHISTORY = "PipelineExecutionHistory"
OCTAGON_OBJECT_EVENTS = "Events"
OCTAGON_OBJECT_ARTIFACTS = "Artifacts"
OCTAGON_OBJECT_METRICS = "Metrics"
class ConfigTableInfo:
def __init__(self, dynamo_table_name, ttl_in_days=0, read_capacity=0, write_capacity=0):
self.dynamo_table_name = dynamo_table_name
self.ttl_in_days = ttl_in_days
self.read_capacity = read_capacity
self.write_capacity = write_capacity
def get_dynamo_table_name(self):
return self.dynamo_table_name
def get_ttl_days(self):
return self.ttl_in_days
def get_read_capacity(self):
return self.read_capacity
def get_write_capacity(self):
return self.write_capacity
def __str__(self):
return f"[ Table name: {self.dynamo_table_name}, TTL: {self.ttl_in_days}, RC: {self.read_capacity}, WC: {self.write_capacity}]"
class MetricInfo:
def __init__(self, metric, evaluation, threshold, notify, metric_type, sns_topic):
self.metric = metric
self.evaluation = evaluation
self.threshold = threshold
self.notify = notify
self.metric_type = metric_type
self.sns_topic = sns_topic
def __str__(self):
return f"[ Metric:{self.metric}, threshold: {self.threshold}]"
class ConfigParser:
def __init__(self, config_file, instance):
self.logger = logging.getLogger(__name__)
self.instance = instance
self.logger.debug(f"Reading configuration from file {config_file}")
if not os.path.isfile(config_file):
msg = f"Octagon configuration file is not found {config_file}"
self.logger.error(msg)
raise ValueError(msg)
with open(config_file, "r") as f:
config_dict = json.load(f)
self.table_info = {}
self.metric_info = []
for config_instance in config_dict["configuration_instances"]:
if config_instance["instance"] == instance:
for ti in config_instance["tables"]:
if "object" in ti.keys() and "table_name" in ti.keys():
table_info = ConfigTableInfo(
dynamo_table_name=ti["table_name"],
ttl_in_days=ti.get("ttl", 0),
read_capacity=ti.get("read_capacity", 0),
write_capacity=ti.get("write_capacity", 0),
)
object_name = ti["object"]
self.table_info[object_name] = table_info
self.logger.debug(f"Loaded config for table: {table_info}")
if "metrics" in config_instance.keys():
for mi in config_instance["metrics"]:
metric_info = MetricInfo(
metric=mi.get("metric"),
evaluation=mi.get("evaluation"),
threshold=mi.get("threshold"),
notify=mi.get("notify"),
metric_type=mi.get("metric_type"),
sns_topic=mi.get("sns_topic", ""),
)
self.metric_info.append(metric_info)
self.logger.debug(f"Loaded config for metric: {metric_info}")
throw_if_false(len(self.table_info) > 0, "Configuration instance is not found")
def get_table_info(self, config_obj: ConfigObjectEnum) -> ConfigTableInfo:
return self.table_info[config_obj.value]
def get_table_name(self, config_obj: ConfigObjectEnum) -> str:
return self.get_table_info(config_obj).get_dynamo_table_name()
def get_events_table(self) -> str:
return self.get_table_name(ConfigObjectEnum.OCTAGON_OBJECT_EVENTS)
def get_table_ttl(self, config_obj: ConfigObjectEnum) -> int:
return self.get_table_info(config_obj).get_ttl_days()
def get_events_ttl(self) -> int:
return self.get_table_ttl(ConfigObjectEnum.OCTAGON_OBJECT_EVENTS)
def get_pipelines_table(self) -> str:
return self.get_table_name(ConfigObjectEnum.OCTAGON_OBJECT_PIPELINES)
def get_peh_table(self) -> str:
return self.get_table_name(ConfigObjectEnum.OCTAGON_OBJECT_PIPELINEHISTORY)
def get_peh_ttl(self) -> str:
return self.get_table_ttl(ConfigObjectEnum.OCTAGON_OBJECT_PIPELINEHISTORY)
def get_artifacts_table(self) -> str:
return self.get_table_name(ConfigObjectEnum.OCTAGON_OBJECT_ARTIFACTS)
def get_artifacts_ttl(self) -> int:
return self.get_table_ttl(ConfigObjectEnum.OCTAGON_OBJECT_ARTIFACTS)
def get_metrics_table(self) -> str:
return self.get_table_name(ConfigObjectEnum.OCTAGON_OBJECT_METRICS)
def get_metrics_ttl(self) -> int:
return self.get_table_ttl(ConfigObjectEnum.OCTAGON_OBJECT_METRICS)
|
441035
|
import logging
import tensorflow as tf
from .mask import get_mask, Unmask
from .transformer import PositionEmbedding, get_sequence_block
from .utils import make_dense_options, make_emb_options
logger = logging.getLogger(__name__)
class VariationalHead(tf.keras.layers.Layer):
"""Encoder head for VAE."""
def __init__(self, output_dim, kl=None, l2=None, **kwargs):
super().__init__(**kwargs)
self.kl = 1.0 if kl is None else kl
self.z_mean = tf.keras.layers.Dense(
output_dim,
name='z_mean',
**make_dense_options(l2),
)
self.z_log_sigma = tf.keras.layers.Dense(
output_dim,
name='z_log_sigma',
**make_dense_options(l2),
)
def call(self, inputs, training=False):
z_mean = self.z_mean(inputs)
z_log_sigma = self.z_log_sigma(inputs)
# Compute KL divergence to normal distribution.
kl_div = -0.5 * tf.reduce_mean(1 + z_log_sigma - tf.square(z_mean) -
tf.exp(z_log_sigma))
self.add_loss(self.kl * kl_div)
self.add_metric(kl_div, name='kl_divergence')
if training:
# Sample when training.
epsilon = tf.random.normal(shape=tf.shape(z_log_sigma))
z_mean += tf.exp(0.5 * z_log_sigma) * epsilon
return z_mean
class Encoder(tf.keras.layers.Layer):
'''
Encoder implementation.
'''
def __init__(
self,
input_columns,
latent_dim=128,
num_blocks=1,
block_type='deepsvg',
dropout=0.1,
kl=None,
l2=None,
**kwargs,
):
super().__init__(**kwargs)
self.input_columns = input_columns
self.kl = 1.0 if kl is None else kl
self.latent_dim = latent_dim
self.input_layer = {}
self.input_layer['const'] = PositionEmbedding(
latent_dim,
self.input_columns['length']['input_dim'],
dropout=dropout,
emb_options=make_emb_options(l2),
name='input_const',
)
for key, column in self.input_columns.items():
if column['type'] == 'categorical':
self.input_layer[key] = tf.keras.layers.Embedding(
input_dim=column['input_dim'],
output_dim=latent_dim,
name='input_%s' % key,
**make_emb_options(l2),
)
elif column['type'] == 'numerical':
self.input_layer[key] = tf.keras.layers.Dense(
units=latent_dim,
name='input_%s' % key,
**make_dense_options(l2),
)
else:
raise ValueError('Invalid column: %s' % column)
self.seq2seq = {}
layer_fn = get_sequence_block(block_type)
for i in range(num_blocks):
self.seq2seq['seq2seq_%d' % i] = layer_fn(
latent_dim,
dropout=dropout,
conditional=True,
pooling=(i == num_blocks - 1),
dense_options=make_dense_options(l2),
name='seq2seq_%d' % i,
)
self.norm = tf.keras.layers.BatchNormalization()
self.unmask = Unmask()
self.head = VariationalHead(latent_dim, kl=kl, l2=l2, name='z_head')
def call(self, inputs, training=False, sampling=False):
batch = tf.shape(inputs['length'])[0]
# Context inputs.
context = tf.zeros((batch, 1, self.latent_dim), dtype=tf.float32)
for key, column in self.input_columns.items():
if not column['is_sequence']:
x = self.input_layer[key](inputs[key])
context += x
context = context[:, 0, :] # Squeeze timesteps.
tf.debugging.assert_rank(context, 2)
# Sequence inputs.
mask = get_mask(inputs['length'])
sequence = self.input_layer['const'](mask, training=training)
for key, column in self.input_columns.items():
if column['is_sequence']:
x = self.input_layer[key](inputs[key])
if column['type'] == 'categorical':
x = tf.reduce_sum(x, axis=2) # Vector categorical
sequence += x
tf.debugging.assert_rank(sequence, 3)
# Sequence transform.
for layer in self.seq2seq.values():
sequence = layer((sequence, context), training=training, mask=mask)
# Last layer already applies temporal pooling, shape=(N, D).
pooled = self.norm(sequence, training=training)
pooled = self.unmask(pooled)
return self.head(pooled, training=training or sampling)
|
441038
|
import numpy as np
from openmdao.api import ExplicitComponent
class PassThrough(ExplicitComponent):
"""
Helper component that is needed when variables must be passed directly from
input to output of a cycle element with no other component in between
"""
def __init__(self, i_var, o_var, val, units=None):
super(PassThrough, self).__init__()
self.i_var = i_var
self.o_var = o_var
self.units = units
self.val = val
if isinstance(val, (float, int)) or np.isscalar(val):
size=1
else:
size = np.prod(val.shape)
self.size = size
def setup(self):
if self.units is None:
self.add_input(self.i_var, self.val)
self.add_output(self.o_var, self.val)
else:
self.add_input(self.i_var, self.val, units=self.units)
self.add_output(self.o_var, self.val, units=self.units)
#partial derivs setup
row_col = np.arange(self.size)
self.declare_partials(of=self.o_var, wrt=self.i_var,
val=np.ones(self.size), rows=row_col, cols=row_col)
def compute(self, inputs, outputs):
outputs[self.o_var] = inputs[self.i_var]
def compute_partials(self, inputs, J):
pass
if __name__ == "__main__":
from openmdao.api import Problem, IndepVarComp
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('foo', val=np.ones(4))
p.model.add_subsystem('pt', PassThrough("foo", "bar", val=np.ones(4)), promotes=['*'])
p.setup()
p.run_model()
p.check_partial_derivatives()
|
441059
|
from minik.status_codes import codes
class MinikError(Exception):
pass
class MinikViewError(MinikError):
STATUS_CODE = codes.server_error
def __init__(self, error_message, *args, **kwargs):
super().__init__(f'{self.__class__.__name__}: {error_message}')
self.status_code = kwargs.get('status_code', self.STATUS_CODE)
class ConfigurationError(MinikError):
def __init__(self, error_message, *args, **kwargs):
super().__init__(self.__class__.__name__ + ': %s' % error_message)
|
441086
|
from setuptools import setup, find_packages
exec(open("greenback/_version.py", encoding="utf-8").read())
LONG_DESC = open("README.rst", encoding="utf-8").read()
setup(
name="greenback",
version=__version__,
description="Reenter an async event loop from synchronous code",
url="https://github.com/oremanj/greenback",
long_description=LONG_DESC,
author="<NAME>",
author_email="<EMAIL>",
license="MIT -or- Apache License 2.0",
packages=find_packages(),
include_package_data=True,
install_requires=["greenlet != 0.4.17", "sniffio", "outcome"],
keywords=["async", "io", "trio", "asyncio"],
python_requires=">=3.6",
classifiers=[
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"Framework :: Trio",
"Framework :: AsyncIO",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Development Status :: 4 - Beta",
],
)
|
441098
|
import time
import torch
import numpy as np
from iirnet.loss import LogMagTargetFrequencyLoss
class SGDFilterDesign(torch.nn.Module):
"""Design a filter by performing SGD."""
def __init__(
self,
n_iters=1000,
lr=5e-4,
schedule_lr=False,
pole_zero=True,
verbose=False,
order=16,
):
super(SGDFilterDesign, self).__init__()
self.n_iters = n_iters
self.lr = lr
self.schedule_lr = schedule_lr
self.pole_zero = pole_zero
self.verbose = verbose
self.order = order
self.magtarget = LogMagTargetFrequencyLoss()
def init_sos(self):
# create the biquad poles and zeros we will optimize
self.sos = torch.nn.Parameter(torch.ones(1, self.order, 6, requires_grad=True))
with torch.no_grad():
self.sos.data.uniform_(0.1, 0.9)
mask = torch.tensor(np.random.choice([1, -1], size=(1, self.order, 6)))
self.sos.data *= mask * torch.ones(1, self.order, 6)
# setup optimization
self.optimizer = torch.optim.SGD([self.sos], lr=self.lr)
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.n_iters
)
def __call__(self, target_dB):
with torch.enable_grad():
self.init_sos()
target_dB = target_dB.to(self.sos.device)
for n in range(self.n_iters):
if self.pole_zero:
g = 100 * torch.sigmoid(self.sos[:, :, 0]) # + 1.0
# all gains are held at 1 except first
g[:, 1:] = 1.0
poles_real = self.sos[:, :, 1]
poles_imag = self.sos[:, :, 2]
zeros_real = self.sos[:, :, 3]
zeros_imag = self.sos[:, :, 4]
# ensure stability
pole = torch.complex(poles_real, poles_imag)
pole = pole * torch.tanh(pole.abs()) / pole.abs()
# ensure zeros inside unit circle
zero = torch.complex(zeros_real, zeros_imag)
zero = zero * torch.tanh(zero.abs()) / zero.abs()
# Apply gain g to numerator by multiplying each coefficient by g
b0 = g
b1 = g * -2 * zero.real
b2 = g * ((zero.real ** 2) + (zero.imag ** 2))
a0 = torch.ones(g.shape, device=g.device)
a1 = -2 * pole.real
a2 = (pole.real ** 2) + (pole.imag ** 2)
# reconstruct SOS
out_sos = torch.stack([b0, b1, b2, a0, a1, a2], dim=-1)
else:
# extract coefficients
b0 = self.sos[:, :, 0]
b1 = self.sos[:, :, 1]
b2 = self.sos[:, :, 2]
a0 = torch.ones(b0.shape, device=b0.device)
a1 = self.sos[:, :, 4]
a2 = self.sos[:, :, 5]
# Eq. 4 from Nercessian et al. 2021
a1 = 2 * torch.tanh(a1)
# Eq. 5 from above
a2 = ((2 - torch.abs(a1)) * torch.tanh(a2) + torch.abs(a1)) / 2
# reconstruct SOS
out_sos = torch.stack([b0, b1, b2, a0, a1, a2], dim=-1)
loss = self.magtarget(out_sos, target_dB.view(1, -1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.schedule_lr:
self.scheduler.step()
if self.verbose:
print(f" {n+1} {loss.item():0.2f} dB")
return out_sos
|
441138
|
from ..core.database import Base
class Widget(Base):
"""
We really don't need anything else in this, except an ID.
"""
pass
|
441155
|
from rest_framework import serializers
from care.facility.api.serializers import TIMESTAMP_FIELDS
from care.facility.api.serializers.facility import FacilityBasicInfoSerializer
from care.facility.models import PatientConsultation, PatientRegistration, Facility
from care.facility.models.prescription_supplier import PrescriptionSupplier
from care.utils.serializer.external_id_field import ExternalIdSerializerField
from config.serializers import ChoiceField
class MinimalPatientSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="external_id")
class Meta:
model = PatientRegistration
fields = ("id", "name", "phone_number", "address")
class PrescriptionSupplierConsultationSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="external_id", read_only=True)
patient = MinimalPatientSerializer(read_only=True)
class Meta:
model = PatientConsultation
fields = ("id", "prescriptions", "discharge_advice", "patient")
class PrescriptionSupplierSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="external_id", read_only=True)
scheme = ChoiceField(choices=PrescriptionSupplier.SchemeChoices)
status = ChoiceField(choices=PrescriptionSupplier.StatusChoices)
consultation_object = PrescriptionSupplierConsultationSerializer(source="consultation", read_only=True)
facility_object = FacilityBasicInfoSerializer(source="facility", read_only=True)
consultation = ExternalIdSerializerField(required=True, queryset=PatientConsultation.objects.all())
facility = ExternalIdSerializerField(required=True, queryset=Facility.objects.all())
class Meta:
model = PrescriptionSupplier
exclude = ("deleted", "external_id")
read_only_fields = TIMESTAMP_FIELDS
def create(self, validated_data):
instance = super().create(validated_data)
instance.updated_user = self.context["request"].user
instance.save()
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
instance.updated_user = self.context["request"].user
instance.save()
return instance
|
441181
|
from __future__ import print_function
import sys
import numpy as np
import math
import mesh.patch as patch
from util import msg
def init_data(my_data, rp):
""" initialize the double Mach reflection problem """
msg.bold("initializing the double Mach reflection problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in ramp.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
r_l = rp.get_param("ramp.rhol")
u_l = rp.get_param("ramp.ul")
v_l = rp.get_param("ramp.vl")
p_l = rp.get_param("ramp.pl")
r_r = rp.get_param("ramp.rhor")
u_r = rp.get_param("ramp.ur")
v_r = rp.get_param("ramp.vr")
p_r = rp.get_param("ramp.pr")
gamma = rp.get_param("eos.gamma")
energy_l = p_l/(gamma - 1.0) + 0.5*r_l*(u_l*u_l + v_l*v_l)
energy_r = p_r/(gamma - 1.0) + 0.5*r_r*(u_r*u_r + v_r*v_r)
# there is probably an easier way to do this, but for now, we
# will just do an explicit loop. Also, we really want to set
# the pressue and get the internal energy from that, and then
# compute the total energy (which is what we store). For now
# we will just fake this
myg = my_data.grid
dens[:, :] = 1.4
for j in range(myg.jlo, myg.jhi+1):
cy_up = myg.y[j] + 0.5*myg.dy*math.sqrt(3)
cy_down = myg.y[j] - 0.5*myg.dy*math.sqrt(3)
cy = np.array([cy_down, cy_up])
for i in range(myg.ilo, myg.ihi+1):
dens[i, j] = 0.0
xmom[i, j] = 0.0
ymom[i, j] = 0.0
ener[i, j] = 0.0
sf_up = math.tan(math.pi/3.0)*(myg.x[i] + 0.5*myg.dx*math.sqrt(3)-1.0/6.0)
sf_down = math.tan(math.pi/3.0)*(myg.x[i] - 0.5*myg.dx*math.sqrt(3)-1.0/6.0)
sf = np.array([sf_down, sf_up]) # initial shock front
for y in cy:
for shockfront in sf:
if y >= shockfront:
dens[i, j] = dens[i, j] + 0.25*r_l
xmom[i, j] = xmom[i, j] + 0.25*r_l*u_l
ymom[i, j] = ymom[i, j] + 0.25*r_l*v_l
ener[i, j] = ener[i, j] + 0.25*energy_l
else:
dens[i, j] = dens[i, j] + 0.25*r_r
xmom[i, j] = xmom[i, j] + 0.25*r_r*u_r
ymom[i, j] = ymom[i, j] + 0.25*r_r*v_r
ener[i, j] = ener[i, j] + 0.25*energy_r
def finalize():
""" print out any information to the user at the end of the run """
pass
|
441215
|
import unittest
from first import first
isbool = lambda x: isinstance(x, bool)
isint = lambda x: isinstance(x, int)
odd = lambda x: isint(x) and x % 2 != 0
even = lambda x: isint(x) and x % 2 == 0
is_meaning_of_life = lambda x: x == 42
class TestFirst(unittest.TestCase):
def test_empty_iterables(self):
s = set()
l = []
assert first(s) is None
assert first(l) is None
def test_default_value(self):
s = set()
l = []
assert first(s, default=42) == 42
assert first(l, default=3.14) == 3.14
l = [0, False, []]
assert first(l, default=3.14) == 3.14
def test_selection(self):
l = [(), 0, False, 3, []]
assert first(l, default=42) == 3
assert first(l, key=isint) == 0
assert first(l, key=isbool) is False
assert first(l, key=odd) == 3
assert first(l, key=even) == 0
assert first(l, key=is_meaning_of_life) is None
if __name__ == '__main__':
unittest.main()
|
441248
|
from relevanceai.dataset import Dataset
def test_reduce_dimensions(test_dataset: Dataset):
model = "pca"
alias = "pca"
n_components = 3
vector_field = "sample_1_vector_"
test_dataset.reduce_dims(
model=model,
n_components=n_components,
vector_fields=[vector_field],
alias=alias,
)
dr_vector_name = f"{alias}_vector_"
assert dr_vector_name in test_dataset.schema
|
441251
|
import re
import yaml
from jinja2 import Template as J2Template
from .config import config
from .prebid import PrebidBidder
logger = config.getLogger(__name__)
JINJA_PATTERN = re.compile(r'{{\W*(\w*)\W*}}')
def render_src(src: str, **kwargs) -> dict:
"""Get object from jinja rendered yaml removing non-word chars from variable references.
Args:
src: templated source string
kwargs: key-value pairs referenced in template
Returns:
A dict of the jinja rendered src
"""
clean_src = JINJA_PATTERN.sub(r'{{ \1 }}', src)
return yaml.safe_load(J2Template(clean_src).render(**kwargs))
def render_cfg(objname: str, bidder: PrebidBidder, media_type: str=None,
cpm: str=None, cpm_min: str=None, cpm_max: str=None) -> dict:
"""Get jinja rendered object of a top level user config object.
Args:
objname: top level object name in user provided config
bidder: a bidder instance
media_type: media type value
cpm: cpm value
cpm_min: cpm minimum value
cpm_max: cpm maximum value
Returns:
A dict of the jinja rendered src
"""
params = dict(
time=config.start_time.strftime("%m/%d/%Y-%H:%M:%S"),
run_mode='Test: ' if config.cli['test_run'] else '',
bidder_code=bidder.codestr,
bidder_name=bidder.name,
)
params.update(bidder.params)
if media_type:
params['media_type'] = media_type
if cpm:
params['cpm'] = cpm
if cpm_min:
params['cpm_min'] = cpm_min
if cpm_max:
params['cpm_max'] = cpm_max
return render_src(yaml.safe_dump(config.user[objname]), **params)
|
441260
|
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import numpy as np
import pickle as pk
import yaml
import random
class LabeledCifar10(Dataset):
def __init__(self, file_name, data_count=None, transforms=None):
self.transforms = transforms
with open(file_name, 'rb') as file:
data = pk.load(file, encoding='latin1')
if data_count is not None:
self.images = []
self.labels = []
index = list(range(len(data['data'])))
random.shuffle(index)
choosen_index = index[: data_count]
for i in choosen_index:
self.images.append(data['data'][i])
self.labels.append(data['labels'][i])
else:
self.images = data['data']
self.labels = data['labels']
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
if self.transforms is not None:
image = self.transforms(image)
image = np.array((image.transpose(2, 0, 1) - 127.5) /
127.5, dtype=np.float32)
return {'image': image, 'label': label}
class UnlabeledCifar10(Dataset):
def __init__(self, file_name, transforms=None):
self.tranforms = transforms
with open(file_name, 'rb') as file:
data = pk.load(file, encoding='latin1')
self.images = data['data']
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = self.images[index]
if self.transforms is not None and self._run_transforms is True:
image = self.transforms(image)
image = np.array((image.transpose(2, 0, 1) - 127.5) /
127.5, dtype=np.float32)
return {'image': image}
class PseudoLabeledCifar10(LabeledCifar10):
def __init__(self, labeled_file_name, unlabeled_file_name, model, device, soft=True, transforms=None):
super(PseudoLabeledCifar10, self).__init__(
file_name=labeled_file_name, transforms=transforms)
# Add pseudo labeled data
with open(unlabeled_file_name, 'rb') as file:
unlabeled_data = pk.load(file, encoding='latin1')
if soft is True:
for i, label in enumerate(self.labels):
label_array = np.zeros(10, dtype=np.float32)
label_array[label] = 1.0
self.labels[i] = label_array
model.to(device)
model.eval()
for image in unlabeled_data['data']:
self.images.append(image)
image = np.array((image.transpose(2, 0, 1) - 127.5) /
127.5, dtype=np.float32)
with torch.no_grad():
output = model(torch.from_numpy(image).unsqueeze(0).to(device))
if soft is True:
logit = F.softmax(output, dim=1).squeeze(0).cpu().numpy()
else:
logit = torch.max(output, dim=1)[1].item()
self.labels.append(logit)
def cross_entropy_with_soft_target(pred, soft_targets):
return torch.mean(torch.sum(- soft_targets + F.log_softmax(pred), 1))
|
441275
|
import torchvision
import torch.utils.data
import resnet
from torch.autograd import Variable
from torch import nn
import early_stop_adv_train
import os, sys
from torchvision import transforms
import argparse
parser = argparse.ArgumentParser()
import warnings
warnings.filterwarnings("ignore")
parser.add_argument('--m', type=str, default='fgsm')
parser.add_argument('--g', type=str, default='0')
args = parser.parse_args()
warnings.filterwarnings('ignore')
os.environ["CUDA_VISIBLE_DEVICES"] = args.g
torch.backends.cudnn.benchmark = True
from adversarialbox.attacks import FGSMAttack, LinfPGDAttack
from adversarialbox.train import adv_train, FGSM_train_rnd
from adversarialbox.utils import to_var, pred_batch, test
train_globa_step=0
val_globa_step=0
from tqdm import tqdm
wd=1e-5
learning_rate=1e-3
epochs=100
batch_size=200
torch.backends.cudnn.benchmark = True
transform=transforms.Compose([
torchvision.transforms.RandomRotation(15, expand=True, center=None),
torchvision.transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
torchvision.transforms.Resize((64,64)),
torchvision.transforms.ToTensor(),
])
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=8)
transform_test=transforms.Compose([torchvision.transforms.Resize((64,64)),
transforms.ToTensor(),
])
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=8)
n = resnet.resnet101().cuda()
weight_p, bias_p = [], []
for name, p in n.named_parameters():
if 'bias' in name:
bias_p += [p]
else:
weight_p += [p]
trans_params = list(map(id, n.trans_conv.parameters()))
class_params = list(map(id, n.group2.parameters()))
base_params = filter(lambda p: id(p) not in trans_params,
n.parameters())
base_params = filter(lambda p: id(p) not in class_params,
base_params)
param = {
'delay': 10,
}
if args.m=='fgsm':
adversary = FGSMAttack()
elif args.m=='pgd':
adversary = LinfPGDAttack()
else:
print('wrong method')
exit(0)
loss1 = nn.MSELoss()
loss1.cuda()
loss2 = nn.CrossEntropyLoss()
loss2.cuda()
optimizer = torch.optim.Adamax([{'params': base_params},
{'params':n.trans_conv.parameters(),'lr':learning_rate},
{'params':n.group2.parameters(),'lr':learning_rate}],
lr=learning_rate,weight_decay=wd)
opt = torch.optim.Adamax([{'params': base_params},
{'params':n.trans_conv.parameters(),'lr':learning_rate}],
lr=learning_rate,weight_decay=wd)
if os.path.exists('bestmodel_params_adv_train_%s.pkl'%(args.m)):
checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%(args.m))
n.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['opt_state_dict'])
opt.load_state_dict(checkpoint['opt_state_dict2'])
sch = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=15)
es = early_stop_adv_train.EarlyStopping('max', patience=10)
for epoch in range(epochs):
loadertrain = tqdm(trainloader, desc='{} E{:03d}'.format('train', epoch), ncols=0)
loadertest = tqdm(testloader, desc='{} E{:03d}'.format('test', epoch), ncols=0)
epoch_loss = 0.0
correct = 0.0
total = 0.0
total2 = 0.0
correct2 = 0.0
for x_train, y_train in loadertrain:
n.train()
x_train, y_train = Variable(x_train.cuda()), Variable(y_train.cuda())
y_pre, c_pre = n(torch.cat((x_train, x_train, x_train), 1))
y_pre = y_pre.cuda()
n.zero_grad()
optimizer.zero_grad()
n.zero_grad()
optimizer.zero_grad()
loss = loss1(torch.mul(y_pre, 255.0), torch.mul(torch.cat((x_train, x_train, x_train), 1), 1.0))
if loss.item() > 3:
loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm(n.parameters(), 10.0)
opt.step()
epoch_loss += loss.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_train.size(0)
correct += predicted.eq(y_train.data).cuda().sum()
torch.cuda.empty_cache()
else:
loss_cl = loss2(c_pre, y_train)
loss_sum = torch.mul(loss, 1 / 1) + loss_cl
if epoch + 1 > param['delay']:
# use predicted label to prevent label leaking
y_pred = pred_batch(torch.cat((x_train, x_train, x_train), 1), n)
x_adv = adv_train(torch.cat((x_train, x_train, x_train), 1), y_pred, n, loss2, adversary)
n.zero_grad()
optimizer.zero_grad()
x_adv_var = to_var(x_adv)
y_pre, c_pre = n(x_adv_var)
loss_adv = loss2(c_pre, y_train)+loss1(torch.mul(y_pre, 1.0), torch.mul(torch.cat((x_train, x_train, x_train), 1), 1.0))/ 1
loss_sum = (loss_sum + loss_adv) / 2
loss_sum.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm(n.parameters(), 10.0)
optimizer.step()
epoch_loss += loss_sum.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_train.size(0)
correct += predicted.eq(y_train.data).cuda().sum()
train_globa_step += 1
torch.cuda.empty_cache()
if loss.item() < 3 and epoch + 1 > param['delay']:
y_pre2, c_pre2 = n(y_pre)
y_pre2 = y_pre2.cuda()
n.zero_grad()
optimizer.zero_grad()
lossreg2 = loss1(torch.mul(y_pre2, 1.0), torch.mul(torch.cat((x_train, x_train, x_train), 1), 1.0))
loss_cl2 = loss2(c_pre2, y_train)
_, predicted2 = torch.max(c_pre2.data, 1)
total2 += y_train.size(0)
correct2 += predicted2.eq(y_train.data).cuda().sum()
loss_sum2 = torch.mul(lossreg2, 1 / 1) + loss_cl2
loss_sum2.backward()
torch.nn.utils.clip_grad_norm(n.parameters(), 10.0)
optimizer.step()
torch.cuda.empty_cache()
if train_globa_step % 100 == 0:
n.eval()
checkpoint = {
'state_dict': n.state_dict(),
'opt_state_dict': optimizer.state_dict(),
'opt_state_dict2': opt.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, 'model_params_adv_train%s.pkl'%(args.m))
fmt = '{:.4f}'.format
loadertrain.set_postfix(loss=fmt(loss.data.item()),
acc=fmt(correct.item() / total * 100))
if (epoch) % 1 == 0:
test_loss = 0.0
correct = 0.0
total = 0.0
n.eval()
with torch.no_grad():
for x_test, y_test in loadertest:
x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda())
y_pre, c_pre = n(torch.cat((x_test, x_test, x_test), 1))
y_pre = y_pre.cuda()
loss_cl = loss2(c_pre, y_test)
loss = loss1(torch.mul(y_pre, 1.0), torch.mul(torch.cat((x_test, x_test, x_test), 1), 1.0))
loss_sum = torch.mul(loss, 1 / 1) + loss_cl
test_loss += loss_sum.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_test.size(0)
correct += predicted.eq(y_test.data).cuda().sum()
val_globa_step += 1
fmt = '{:.4f}'.format
loadertest.set_postfix(loss=fmt(loss_sum.data.item()),
acc=fmt(correct.item() / total * 100))
sch.step(test_loss)
fl = es.step(correct.item() / total * 100, n, optimizer, opt, epoch,args.m)
if fl:
torch.cuda.empty_cache()
sys.exit(0)
torch.cuda.empty_cache()
|
441296
|
from abc import get_cache_token
from dataclasses import dataclass
from functools import _find_impl, update_wrapper
from typing import Callable, Optional
@dataclass
class RegistryFunc:
# The function saved in the registry
func: Callable
# Whether the function should be registered for subclasses as well
include_subclasses: bool
def withregistry(base_func):
import types, weakref
registry = {}
dispatch_cache = weakref.WeakKeyDictionary()
cache_token = None
def dispatch(cls) -> Optional[RegistryFunc]:
nonlocal cache_token
if cache_token is not None:
current_token = get_cache_token()
if cache_token != current_token:
dispatch_cache.clear()
cache_token = current_token
if cls in dispatch_cache:
impl = dispatch_cache[cls]
else:
if cls in registry:
impl = registry[cls]
else:
try:
impl: Optional[RegistryFunc] = _find_impl(cls, registry)
if not impl.include_subclasses:
# Do not allow implicit inherited implementation without type
impl = None
except Exception:
impl = None
dispatch_cache[cls] = impl
return impl
def register(cls, func=None, include_subclasses=False):
nonlocal cache_token
if func is None:
if isinstance(cls, type):
return lambda f: register(cls, func=f, include_subclasses=include_subclasses)
ann = getattr(cls, '__annotations__', {})
if not ann:
raise TypeError(
f"Invalid first argument to `register()`: {cls!r}. "
f"Use either `@register(some_class)` or plain `@register` "
f"on an annotated function."
)
func = cls
# only import typing if annotation parsing is necessary
from typing import get_type_hints
argname, cls = next(iter(get_type_hints(func).items()))
assert isinstance(cls, type), (
f"Invalid annotation for {argname!r}. {cls!r} is not a class."
)
registry[cls] = RegistryFunc(func, include_subclasses)
if cache_token is None and hasattr(cls, '__abstractmethods__'):
cache_token = get_cache_token()
dispatch_cache.clear()
return func
def wrapper(*args, **kw):
# Unlike singledispatch we do not directly override the base call
return base_func(*args, **kw)
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = types.MappingProxyType(registry)
wrapper._clear_cache = dispatch_cache.clear
update_wrapper(wrapper, base_func)
return wrapper
|
441344
|
import unittest
from beefore import diff
class TestDiff(unittest.TestCase):
def test_add_lines(self):
diff_content = [
"diff --git a/tests/path/to/testfile b/tests/path/to/testfile",
"@@ -1,4 +1,6 @@",
" 1",
"+2",
"+3",
" 4",
" 5",
" 6"
]
self.assertEqual(
diff.positions('tests', diff_content),
{
"path/to/testfile": {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
}
)
def test_subtract_lines(self):
diff_content = [
"diff --git a/tests/path/to/testfile b/tests/path/to/testfile",
"@@ -1,6 +1,2 @@",
" 1",
"-2",
"-3",
" 4"
]
self.assertEqual(
diff.positions('tests', diff_content),
{
"path/to/testfile": {1: 1, 2: 4}
}
)
def test_add_subtract(self):
diff_content = [
"diff --git a/tests/path/to/testfile b/tests/path/to/testfile",
"index 5f4d692..5b05678 100644",
"--- a/tests/path/to/testfile",
"+++ b/tests/path/to/testfile",
"@@ -2,0 +2,1 @@",
" 1",
"+2",
" 3",
"@@ -13,7 +14,4 @@",
" 4",
"-5",
"-6",
"+7",
" 8",
"-9",
"+10"
]
self.assertEqual(
diff.positions('tests', diff_content),
{
"path/to/testfile": {2: 1, 3: 2, 4: 3, 14: 5, 15: 8, 16: 9, 17: 11}
}
)
def test_no_diff(self):
diff_content = [
"1",
"2",
"3",
"4"
]
self.assertEqual(
diff.positions('tests', diff_content),
{}
)
def test_multi_file(self):
diff_content = [
"diff --git a/tests/path/to/testfile b/tests/path/to/testfile",
"index 5f4d692..5b05678 100644",
"--- a/tests/path/to/testfile",
"+++ b/tests/path/to/testfile",
"@@ -2,0 +2,1 @@",
" 1",
"+2",
" 3",
"@@ -13,7 +14,4 @@",
" 4",
"-5",
"-6",
"+7",
" 8",
"-9",
"+10",
"diff --git a/tests/path/to/secondfile b/tests/path/to/secondfile",
"index 5f4d692..5b05678 100644",
"--- a/tests/path/to/secondfile",
"+++ b/tests/path/to/secondfile",
"@@ -2,0 +2,1 @@",
" 1",
"+2",
" 3",
"@@ -13,7 +14,4 @@",
" 4",
"-5",
"-6",
"+7",
" 8",
"-9",
"+10"
]
self.assertEqual(
diff.positions('tests', diff_content),
{
"path/to/testfile": {2: 1, 3: 2, 4: 3, 14: 5, 15: 8, 16: 9, 17: 11},
"path/to/secondfile": {2: 1, 3: 2, 4: 3, 14: 5, 15: 8, 16: 9, 17: 11},
}
)
|
441345
|
import abc
import importlib
from typing import TypeVar, Generic, Union, Any, cast, Iterator, Type, Callable
from types import ModuleType
from pathlib import Path
import amino
from amino import boolean
from amino.func import I
from amino.tc.base import F
from amino.util.mod import unsafe_import_name
from amino.tc.monoid import Monoid
from amino.tc.monad import Monad
from amino.util.string import ToStr
from amino.do import do, Do
from amino.util.exception import format_exception
A = TypeVar('A')
B = TypeVar('B')
C = TypeVar('C')
D = TypeVar('D')
E = TypeVar('E', bound=Exception)
class ImportFailure(ToStr):
@abc.abstractproperty
def expand(self) -> 'amino.list.List[str]':
...
class ImportException(ImportFailure):
def __init__(self, desc: str, exc: Exception) -> None:
self.desc = desc
self.exc = exc
def _arg_desc(self) -> 'amino.list.List':
return amino.List(self.desc, str(self.exc))
@property
def expand(self) -> 'amino.list.List':
return format_exception(self.exc).cons(self.desc)
class InvalidLocator(ImportFailure):
def __init__(self, msg: str) -> None:
self.msg = msg
def _arg_desc(self) -> 'amino.list.List':
return amino.List(self.msg)
@property
def expand(self) -> 'amino.list.List':
return amino.List(self.msg)
class Either(Generic[A, B], F[B], implicits=True):
@staticmethod
def import_name(mod: str, name: str) -> 'Either[ImportFailure, B]':
try:
value = (
__builtins__[name]
if mod == '__builtins__' else
unsafe_import_name(mod, name)
)
except Exception as e:
return Left(ImportException(f'{mod}.{name}', e))
else:
return Left(InvalidLocator(f'{mod} has no attribute {name}')) if value is None else Right(value)
@staticmethod
def import_path(path: str) -> 'Either[ImportFailure, B]':
from amino.list import List
return (
List.wrap(path.rsplit('.', 1))
.lift_all(0, 1)
.to_either(InvalidLocator(f'invalid module path: {path}'))
.flat_map2(lambda a, b: Either.import_name(a, b).lmap(lambda c: ImportException(path, c)))
)
@staticmethod
def import_module(modname: str) -> 'Either[ImportFailure, ModuleType]':
try:
mod = importlib.import_module(modname)
except Exception as e:
return Left(ImportException(modname, e))
else:
return Right(mod)
@staticmethod
def import_file(path: Path) -> 'Either[ImportFailure, ModuleType]':
from amino.maybe import Maybe
def step2(spec: importlib._bootstrap.ModuleSpec) -> ModuleType:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
try:
module = Maybe.check(importlib.util.spec_from_file_location('temp', str(path))) / step2
except Exception as e:
return Left(ImportException(str(path), e))
else:
return module.to_either(InvalidLocator(f'failed to import `{path}`'))
@staticmethod
def import_from_file(path: Path, name: str) -> 'Either[ImportFailure, B]':
@do(Either[ImportFailure, B])
def run() -> Do:
module = yield Either.import_file(path)
attr = getattr(module, name, None)
yield (
Left(InvalidLocator(f'{path} has no attribute {name}'))
if attr is None else
Right(attr)
)
return run()
@staticmethod
def exports(modpath: str) -> 'Either[ImportFailure, amino.List[Any]]':
@do(Either[ImportFailure, amino.List[Any]])
def run() -> Do:
from amino.list import Lists
exports = yield Either.import_name(modpath, '__all__')
yield Lists.wrap(exports).traverse(lambda a: Either.import_name(modpath, a), Either)
return run()
@staticmethod
def getattr(obj: Any, attr: str) -> 'Either[str, A]':
return Right(getattr(obj, attr)) if hasattr(obj, attr) else Left(f'`{obj}` has no attribute `{attr}`')
@property
def is_right(self) -> 'amino.boolean.Boolean':
return boolean.Boolean(isinstance(self, Right))
@property
def is_left(self) -> 'amino.boolean.Boolean':
return boolean.Boolean(isinstance(self, Left))
@property
def __left_value(self) -> A:
return cast(A, self.value)
@property
def __right_value(self) -> B:
return cast(B, self.value)
def leffect(self, f: Callable[[A], Any]) -> 'Either[A, B]':
if self.is_left:
f(self.__left_value)
return self
def bieffect(self, l: Callable[[A], Any], r: Callable[[B], Any]) -> 'Either[A, B]':
self.cata(l, r)
return self
def cata(self, fl: Callable[[A], C], fr: Callable[[B], C]) -> C:
return fl(self.__left_value) if self.is_left else fr(self.__right_value)
def bimap(self, fl: Callable[[A], C], fr: Callable[[B], D]) -> 'Either[C, D]':
return Left(fl(self.__left_value)) if self.is_left else Right(fr(self.__right_value))
def recover_with(self, f: Callable[[A], 'Either[C, B]']) -> 'Either[C, B]':
return self.cata(f, Right)
def right_or_map(self, f: Callable[[A], C]) -> C:
return self.cata(f, I)
def value_or(self, f: Callable[[A], B]) -> B:
return self.cata(f, I)
def left_or(self, f: Callable[[B], A]) -> A:
return self.cata(I, f)
def left_or_map(self, f: Callable[[B], A]) -> A:
return self.cata(I, f)
@property
def ljoin(self) -> 'Either[A, C]':
return self.right_or_map(Left)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.value)
def __repr__(self) -> str:
return '{}({!r})'.format(self.__class__.__name__, self.value)
@property
def to_list(self) -> 'amino.list.List[B]':
return self.to_maybe.to_list
def lmap(self, f: Callable[[A], C]) -> 'Either[C, B]':
return cast(Either, Left(f(self.__left_value))) if self.is_left else cast(Either, Right(self.__right_value))
def get_or_raise(self) -> B:
def fail(err: A) -> B:
raise err if isinstance(err, Exception) else Exception(err)
return self.cata(fail, I)
@property
def fatal(self) -> B:
return self.get_or_raise()
def __iter__(self) -> Iterator[B]:
return iter(self.to_list)
@property
def swap(self) -> 'Either[B, A]':
return self.cata(Right, Left)
@property
def json_repr(self) -> B:
return self.to_maybe.json_repr
def accum_error(self, b: 'Either[A, C]') -> 'Either[A, C]':
return self.accum_error_f(lambda: b)
def accum_error_f(self, f: Callable[[], 'Either[A, C]'], *a, **kw) -> 'Either[A, C]':
def acc(v: A) -> 'Either[A, C]':
monoid = Monoid.fatal_for(self.__left_value)
return monoid.combine(self.__left_value, v)
return f(*a, **kw).lmap(acc) if self.is_left else self
def accum_error_lift(self, f: Callable[[], 'Either[A, C]'], *a, **kw) -> 'Either[A, C]':
def acc(v: A) -> 'Either[A, C]':
monoid = Monoid.fatal_for(self.__left_value)
monad = Monad.fatal_for(self.__left_value)
return monoid.combine(self.__left_value, monad.pure(v))
return f(*a, **kw).lmap(acc) if self.is_left else self
def filter_with(self, f: Callable[[B], bool], g: Callable[[B], C]) -> 'Either[C, B]':
return self // (lambda a: Right(a) if f(a) else Left(g(a)))
def left_contains(self, a: A) -> 'amino.boolean.Boolean':
return boolean.Boolean(self.is_left and self.__left_value == a)
class Right(Either[A, B]):
def __init__(self, value: B) -> None:
self.value = value
def __eq__(self, other: Any) -> bool:
return isinstance(other, Right) and self._Either__right_value == other._Either__right_value
class Left(Either[A, B]):
def __init__(self, value: A) -> None:
self.value = value
def __eq__(self, other: Any) -> bool:
return isinstance(other, Left) and self._Either__left_value == other._Either__left_value
def Try(f: Callable[..., A], *a: Any, **kw: Any) -> Either[Exception, A]:
try:
return Right(f(*a, **kw))
except Exception as e:
return Left(e)
__all__ = ('Either', 'Left', 'Right', 'ImportFailure', 'ImportException', 'InvalidLocator', 'Try')
|
441351
|
import errno
import omcache
import random
import select
from pytest import raises # pylint: disable=E0611
from time import sleep
from . import OMcacheCase
class TestOmcache(OMcacheCase):
def test_internal_utils(self):
err = select.error(errno.EINTR, "interrupted")
assert omcache._select_errno(err) == errno.EINTR
def test_set_servers(self):
servers = [self.get_memcached(), self.get_memcached()]
oc = omcache.OMcache(servers, self.log)
oc.set_servers(servers + ["127.0.0.1:2", "127.0.0.1:113333", "127.0.0.1:113000"])
oc.set_servers(servers * 8)
oc.set_servers(servers)
def test_stat(self):
servers = [self.get_memcached(), self.get_memcached()]
oc = omcache.OMcache(servers, self.log)
s1 = oc.stat("settings", 0)
s2 = oc.stat("settings", 1)
assert s1 != s2
s1 = oc.stat("", 0)
s2 = oc.stat("", 1)
assert s1 != s2
assert len(s1) > 30
assert len(s2) > 30
oc.noop(0)
oc.noop(1)
def test_incr_decr(self):
oc = omcache.OMcache([self.get_memcached()], self.log)
with raises(omcache.NotFoundError):
oc.increment("test_incr_decr", 2, initial=None)
assert oc.increment("test_incr_decr", 2, initial=0) == 0
assert oc.increment("test_incr_decr", 2) == 2
assert oc.increment("test_incr_decr", 2) == 4
assert oc.increment("test_incr_decr", 2, initial=42) == 6
assert oc.decrement("test_incr_decr", 5) == 1
assert oc.decrement("test_incr_decr", 5) == 0
oc.set("test_incr_decr", "567")
assert oc.decrement("test_incr_decr", 5) == 562
oc.set("test_incr_decr", "x567")
with raises(omcache.DeltaBadValueError):
oc.decrement("test_incr_decr", 5)
oc.set("test_incr_decr", "100")
assert oc.decrement("test_incr_decr", -5) == 105
assert oc.increment("test_incr_decr", -5) == 100
with raises(omcache.Error):
oc.increment("test_incr_decr_e", 2, expiration=42, initial=None)
def test_add_replace_set_delete(self):
oc = omcache.OMcache([self.get_memcached(), self.get_memcached()], self.log)
with raises(omcache.NotFoundError):
oc.replace("test_arsd", "replaced")
with raises(omcache.NotFoundError):
oc.get("test_arsd")
oc.add("test_arsd", "added")
assert oc.get("test_arsd") == b"added"
oc.set("test_arsd", "set")
assert oc.get("test_arsd") == b"set"
oc.replace("test_arsd", "replaced")
assert oc.get("test_arsd") == b"replaced"
with raises(omcache.KeyExistsError):
oc.add("test_arsd", "foobar")
assert oc.get("test_arsd") == b"replaced"
oc.delete("test_arsd")
with raises(omcache.NotFoundError):
oc.delete("test_arsd")
oc.set("test_arsd", "arsd", flags=531)
res, flags, cas = oc.get("test_arsd", flags=True, cas=True) # pylint: disable=W0632
assert res == b"arsd"
assert flags == 531
assert cas > 0
res, flags = oc.get("test_arsd", flags=True) # pylint: disable=W0632
assert flags == 531
def test_cas(self):
oc = omcache.OMcache([self.get_memcached()], self.log)
with raises(omcache.NotFoundError):
oc.set("test_cas", "xxx", cas=42424242)
oc.set("test_cas", "xxx")
with raises(omcache.KeyExistsError):
oc.set("test_cas", "xxx", cas=42424242)
res, cas1 = oc.get("test_cas", cas=True) # pylint: disable=W0632
assert res == b"xxx"
assert cas1 > 0
oc.set("test_cas", "42", cas=cas1)
with raises(omcache.KeyExistsError):
oc.set("test_cas", "zzz", cas=cas1)
res, cas2 = oc.get("test_cas", cas=True) # pylint: disable=W0632
assert res == b"42"
assert cas2 > 0
assert cas2 != cas1
oc.increment("test_cas", 8)
with raises(omcache.KeyExistsError):
oc.set("test_cas", "zzz", cas=cas2)
res, cas3 = oc.get("test_cas", cas=True) # pylint: disable=W0632
assert res == b"50"
assert cas3 > 0
assert cas3 != cas2
def test_touch(self):
oc = omcache.OMcache([self.get_memcached()], self.log)
oc.set("test_touch", "qwerty", expiration=2)
assert oc.get("test_touch") == b"qwerty"
sleep(2)
with raises(omcache.NotFoundError):
oc.get("test_touch")
oc.set("test_touch", "qwerty", expiration=1)
assert oc.get("test_touch") == b"qwerty"
oc.touch("test_touch", expiration=3)
sleep(2)
assert oc.get("test_touch") == b"qwerty"
def test_append_prepend(self):
oc = omcache.OMcache([self.get_memcached()], self.log)
oc.set("test_ap", "asdf")
assert oc.get("test_ap") == b"asdf"
oc.append("test_ap", "zxcvb")
assert oc.get("test_ap") == b"asdfzxcvb"
oc.prepend("test_ap", "qwerty")
assert oc.get("test_ap") == b"qwertyasdfzxcvb"
def test_multi(self):
oc = omcache.OMcache([self.get_memcached(), self.get_memcached()], self.log)
item_count = 123
val = str(random.random()).encode("utf-8")
for i in range(item_count):
oc.set("test_multi_{0}".format(i * 2), val, flags=i)
keys = ["test_multi_{0}".format(i) for i in range(item_count * 2)]
random.shuffle(keys)
results = oc.get_multi(keys)
assert len(results) == item_count
for i in range(item_count):
assert results["test_multi_{0}".format(i * 2).encode("utf-8")] == val
# test with flags and cas
results = oc.get_multi(keys, flags=True)
assert len(results) == item_count
for i in range(item_count):
assert results["test_multi_{0}".format(i * 2).encode("utf-8")] == (val, i)
results = oc.get_multi(keys, cas=True)
assert len(results) == item_count
# count the number of distinct cas values, we can't just compare
# them to the previous entry as we're using two memcache servers
# which may use the same cas values
casses = set()
for i in range(item_count):
res, cas = results["test_multi_{0}".format(i * 2).encode("utf-8")]
assert res == val
casses.add(cas)
assert len(casses) > item_count / 3
results = oc.get_multi(keys, cas=True, flags=True)
assert len(results) == item_count
casses = set()
for i in range(item_count):
res, flags, cas = results["test_multi_{0}".format(i * 2).encode("utf-8")]
assert res == val
assert flags == i
casses.add(cas)
assert len(casses) > item_count / 3
def test_dist_methods(self):
# just make sure the different distribution methods distribute keys, well, differently
mc1 = self.get_memcached()
mc2 = self.get_memcached()
oc = omcache.OMcache([mc1, mc2], self.log)
item_count = 123
for i in range(item_count):
oc.set("test_dist_{0}".format(i), "orig_dist")
oc.flush()
oc.set_distribution_method("libmemcached_ketama")
for i in range(item_count):
oc.set("test_dist_{0}".format(i), "ketama")
oc.flush()
oc.set_distribution_method("libmemcached_ketama_weighted")
for i in range(item_count):
oc.set("test_dist_{0}".format(i), "ketama_weighted")
oc.flush()
oc.set_distribution_method("libmemcached_ketama_pre1010")
for i in range(item_count):
oc.set("test_dist_{0}".format(i), "ketama_pre1010")
oc.flush()
with raises(omcache.Error):
oc.set_distribution_method("xxx")
keys = ["test_dist_{0}".format(i) for i in range(item_count)]
oc.set_servers([mc1])
results = list(oc.get_multi(keys).values())
oc.set_servers([mc2])
results.extend(oc.get_multi(keys).values())
counts = {}
for value in results:
if value not in counts:
counts[value] = 0
counts[value] += 1
assert set([b"ketama", b"ketama_weighted", b"ketama_pre1010"]).issuperset(counts)
assert counts[b"ketama"] >= item_count / 10
assert counts[b"ketama_weighted"] >= item_count / 10
assert counts[b"ketama_pre1010"] >= item_count / 10
|
441373
|
import os
import pytest
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
from conans.util.files import load
@pytest.fixture(scope="module")
def setup():
client = TestClient(default_server_user=True)
conanfile = GenConanfile().with_settings("os", "arch").with_package_file("helloHello0.h", "x")
client.save({"conanfile.py": conanfile})
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
client.run("export . {}".format(ref))
client.run("install {} -s os=Windows --build missing".format(ref))
client.run("install {} -s os=Linux --build missing".format(ref))
client.run("install {} -s os=Linux -s arch=x86 --build missing".format(ref))
client.run("upload {} --all".format(ref))
package_ids = os.listdir(client.cache.package_layout(ref).packages())
return client, ref, package_ids, str(conanfile)
def test_download_all(setup):
client, ref, package_ids, _ = setup
new_client = TestClient(servers=client.servers, users=client.users)
# Should retrieve the three packages
new_client.run("download Hello0/0.1@lasote/stable")
packages = os.listdir(os.path.join(new_client.cache.package_layout(ref).packages()))
assert set(packages) == set(package_ids)
def test_download_some_reference(setup):
client, ref, package_ids, _ = setup
new_client = TestClient(servers=client.servers, users=client.users)
# Should retrieve the specified packages
new_client.run("download Hello0/0.1@lasote/stable -p %s" % package_ids[0])
packages = os.listdir(new_client.cache.package_layout(ref).packages())
assert len(packages) == 1
assert packages[0] in package_ids
new_client.run("download Hello0/0.1@lasote/stable -p %s -p %s" % (package_ids[0],
package_ids[1]))
packages = os.listdir(new_client.cache.package_layout(ref).packages())
assert len(packages) == 2
assert packages[0] in package_ids
assert packages[1] in package_ids
def test_download_recipe_twice(setup):
client, ref, package_ids, conanfile = setup
new_client = TestClient(servers=client.servers, users=client.users)
new_client.run("download Hello0/0.1@lasote/stable")
conanfile_path = new_client.cache.package_layout(ref).conanfile()
assert conanfile == load(conanfile_path)
new_client.run("download Hello0/0.1@lasote/stable")
assert conanfile == load(conanfile_path)
new_client.run("download Hello0/0.1@lasote/stable")
assert conanfile == load(conanfile_path)
def test_download_packages_twice(setup):
client, ref, package_ids, _ = setup
new_client = TestClient(servers=client.servers, users=client.users)
expected_header_contents = "x"
pref = PackageReference(ref, package_ids[0])
package_folder = new_client.cache.package_layout(ref).package(pref)
new_client.run("download Hello0/0.1@lasote/stable")
got_header = load(os.path.join(package_folder, "helloHello0.h"))
assert expected_header_contents == got_header
new_client.run("download Hello0/0.1@lasote/stable")
got_header = load(os.path.join(package_folder, "helloHello0.h"))
assert expected_header_contents == got_header
new_client.run("download Hello0/0.1@lasote/stable")
got_header = load(os.path.join(package_folder, "helloHello0.h"))
assert expected_header_contents == got_header
def test_download_all_but_no_packages():
# Remove all from remote
new_client = TestClient(default_server_user=True)
# Try to install all
new_client.run("download Hello0/0.1@lasote/stable", assert_error=True)
assert "Recipe not found: 'Hello0/0.1@lasote/stable'" in new_client.out
# Upload only the recipe
new_client.save({"conanfile.py": GenConanfile()})
new_client.run("export . Hello0/0.1@lasote/stable ")
new_client.run("upload Hello0/0.1@lasote/stable --all")
# And try to download all
new_client.run("download Hello0/0.1@lasote/stable")
assert "No remote binary packages found in remote" in new_client.out
|
441377
|
import sys
n, m = map(int, sys.stdin.readline().split(' '))
nums = sorted(list(map(int, sys.stdin.readline().split(' '))))
selected = [0 for _ in range(m)]
used = [0 for _ in range(n + 1)]
def rec_func(k):
if k == m:
for x in selected:
sys.stdout.write(str(x) + ' ')
sys.stdout.write('\n')
else:
last_cand = 0
for cand in range(n):
if used[cand] == 1 or nums[cand] == last_cand:
continue
last_cand = nums[cand]
selected[k] = nums[cand]
used[cand] = 1
rec_func(k + 1)
selected[k] = 0
used[cand] = 0
rec_func(0)
|
441401
|
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
listvalue = []
last1 = 0
last2 = 0
sum = 0
for i in range(len(ops)):
if (ops[i] == '+'):
sum = listvalue[len(listvalue)-1] + listvalue[len(listvalue)-2] + sum
listvalue.append(listvalue[len(listvalue)-1] + listvalue[len(listvalue)-2])
else:
if (ops[i] == 'C'):
sum = sum - listvalue[len(listvalue)-1]
listvalue.pop()
else:
if (ops[i] == 'D'):
sum = 2 * listvalue[len(listvalue)-1] + sum
listvalue.append(listvalue[len(listvalue)-1]*2)
else:
sum = sum + int(ops[i])
listvalue.append(int(ops[i]))
return sum
|
441416
|
import sys, json
import subprocess
from docker_dt import ExpRunner
from os.path import expanduser
from pssh.clients.native.single import SSHClient
class SingleNodeExp(ExpRunner):
def __init__(self, config):
""""""
self.config = config
self._parse_config(config)
def _parse_config(self, config):
self.host_user_dir = config["host_user_dir"]
self.docker_user_dir = config["docker_user_dir"]
self.docker_user = config["docker_user"]
self.docker_ssh_port = config["docker_ssh_port"]
self.script_path = self._trans_docker_path(config["script_path"])
self.script_args = config["script_args"]
self.log_folder = config['log_folder']
self.docker_key = config["docker_ssh_key"]
self.bw_limit = "ST"
self.default_bw = "ST"
def _start_containers(self):
""" start local container only"""
stop_cmd = "docker kill $(docker ps -q)"
pull_cmd = "docker pull zarzen/horovod-mod:1.0"
start_cmd = "sudo docker run --gpus 1 --network=host --detach --ipc=host "\
"-v {}/autorun/distributed-training:{}/distributed-training "\
"-v {}/autorun/horovod_logs:{}/horovod_logs "\
"-v {}/data:{}/data "\
"zarzen/horovod-mod:1.0".format(self.host_user_dir, self.docker_user_dir,
self.host_user_dir, self.docker_user_dir,
self.host_user_dir, self.docker_user_dir)
subprocess.run(stop_cmd, shell=True)
subprocess.run(pull_cmd, shell=True)
subprocess.run(start_cmd, shell=True)
def _init_host_env(self):
check_cmd = "mkdir ~/autorun; mkdir ~/autorun/horovod_logs; " \
"mkdir ~/autorun/horovod_logs/hooks; "\
"mkdir ~/autorun/horovod_logs/model_log; "\
"mkdir ~/autorun/horovod_logs/mpi_events; "\
"mkdir ~/autorun/logs/; "\
"mkdir ~/autorun/logs/net; mkdir ~/autorun/logs/cpu; mkdir ~/data "
subprocess.run(check_cmd, shell=True)
check_cmd = "cd ~/autorun; ls|grep distributed-training"
output = subprocess.check_output(check_cmd, shell=True)
if output != b"":
git_pull = "cd ~/autorun/distributed-training; git pull"
subprocess.run(git_pull, shell=True)
else:
cmd = "cd ~/autorun;"\
"git clone https://github.com/zarzen/distributed-training.git"
subprocess.run(cmd, shell=True)
def run(self):
""""""
self._init_host_env()
self._start_containers()
self._init_docker_ssh()
self.exist_logs = self._get_logs()
# self._exe_cmd(self.contianer, "ls")
cmd = self.build_train_cmd()
print('running command:', cmd)
self._exe_cmd(self.contianer, cmd)
print('End experiment')
self.move_log()
def build_train_cmd(self):
""""""
exp_cmd = "python3 {} {}".format(self.script_path, self.script_args)
return exp_cmd
def _init_docker_ssh(self):
self.contianer = SSHClient("localhost", user=self.docker_user, port=2022,
pkey=self.docker_key)
def _kill_containers(self):
stop_cmd = "docker kill $(docker ps -q)"
subprocess.run(stop_cmd, shell=True)
def _exe_cmd(self, client, cmd):
_channel, _host, stdout, stderr, stdin = client.run_command(cmd)
for line in stdout:
print(_host, ":", line)
for line in stderr:
print(_host, ":", line)
def main():
if len(sys.argv) < 2:
print("Please specific config file")
sys.exit()
return
with open(sys.argv[1]) as config_file:
config = json.load(config_file)
exp = SingleNodeExp(config)
exp.run()
if __name__ == "__main__":
main()
|
441439
|
from .attributes import AttributeItem
from .dom import ItemCollection
class InputCollection(ItemCollection):
def __init__(self):
super(InputCollection, self).__init__()
self.item = InputItem
class InputItem(AttributeItem):
def __init__(self, name, value=None, **kwargs):
super(InputItem, self).__init__(name, value, **kwargs)
self._current_value = value
self.placeholder = kwargs.get("placeholder", None)
self.default_value = kwargs.get("default_value", None)
self.options = OptionCollection()
def changed(self):
return self.value == self._current_value
class OptionCollection(ItemCollection):
def __init__(self):
super(OptionCollection, self).__init__()
self.item = OptionItem
class OptionItem(object):
def __init__(self, name, value, **kwargs):
self.name = name
self.value = value
|
441492
|
import os
import json
import numpy as np
from .detection import DETECTION
from ..paths import get_file_path
import xml.etree.ElementTree as ET
import json
class myData(DETECTION):
def __init__(self, db_config, split=None, sys_config=None):
assert split is None or sys_config is not None
super(myData, self).__init__(db_config)
# 训练数据存放的基路径
self.base_data_path = '/home/myuser/xujing/CornerNet_lite_traffic/data/'
#ImageNet的mean,std
self._mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
#class id #替换成自己的
self._myData_cls_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,12, 13,
14, 15, 16, 17, 18, 19, 20, 21
]
# class name替换成自己的
self._myData_cls_names = [
'class_1', 'class_2', 'class_3', 'class_4', 'class_5', 'class_6', 'class_7',
'class_8', 'class_9', 'class_10','class_11', 'class_12',
'class_13', 'class_14', 'class_15','class_16', 'class_17', 'class_18',
'class_19', 'class_20','class_0'
]
self._cls2myData = {ind + 1: myData_id for ind, myData_id in enumerate(self._myData_cls_ids)} #{1:0,2:1,...}
self._myData2cls = {myData_id: cls_id for cls_id, myData_id in self._cls2myData.items()} #{0:1,1:2,...}
self._myData2name = {cls_id: cls_name for cls_id, cls_name in zip(self._myData_cls_ids, self._myData_cls_names)} #{0:"aeroplane"}
self._name2myData = {cls_name: cls_id for cls_id, cls_name in self._myData2name.items()} #{"aeroplane":0}
if split is not None:
# myData的根路径
myData_dir = os.path.join(self.base_data_path, "myData") # 路径换成自己的
# 数据集的分割
self._split = {
"train": "train",
"valid": "valid",
"test": "test"
}[split]
# image文件存放的文件夹
self._data_dir = os.path.join(myData_dir, 'JPEGImages',self._split)
# Annotations文件存放的文件夹
self.xml_path = os.path.join(myData_dir, "Annotations",self._split)
self._detections, self._eval_ids = self._load_myData_annos()
self._image_ids = list(self._detections.keys())
self._db_inds = np.arange(len(self._image_ids))
def _load_myData_annos(self):
eval_ids = {}
detections = {}
i = 0
xml_path=self.xml_path #路径换成自己的
for f in os.listdir(xml_path):
res = []
if not f.endswith('.xml'):
continue
name = f.rstrip('.xml') + str('.jpg')
eval_ids[name] = i
i = i + 1
bndbox = dict()
size = dict()
current_image_id = None
current_category_id = None
file_name = None
size['width'] = None
size['height'] = None
size['depth'] = None
xml_file = os.path.join(xml_path, f)
# print(xml_file)
tree = ET.parse(xml_file)
root = tree.getroot()
if root.tag != 'annotation':
raise Exception('pascal voc xml root element should be annotation, rather than {}'.format(root.tag))
# elem is <folder>, <filename>, <size>, <object>
for elem in root:
current_parent = elem.tag
current_sub = None
object_name = None
if elem.tag == 'folder':
continue
if elem.tag == 'filename':
file_name = elem.text
for subelem in elem:
bndbox['xmin'] = None
bndbox['xmax'] = None
bndbox['ymin'] = None
bndbox['ymax'] = None
current_sub = subelem.tag
if current_parent == 'object' and subelem.tag == 'name':
object_name = subelem.text
elif current_parent == 'size':
if size[subelem.tag] is not None:
raise Exception('xml structure broken at size tag.')
size[subelem.tag] = int(subelem.text)
# option is <xmin>, <ymin>, <xmax>, <ymax>, when subelem is <bndbox>
for option in subelem:
if current_sub == 'bndbox':
if bndbox[option.tag] is not None:
raise Exception('xml structure corrupted at bndbox tag.')
bndbox[option.tag] = int(float(option.text))
# only after parse the <object> tag
if bndbox['xmin'] is not None:
bbox = []
# x
bbox.append(bndbox['xmin'])
# y
bbox.append(bndbox['ymin'])
# w
bbox.append(bndbox['xmax'])
# h
bbox.append(bndbox['ymax'])
category = self._name2myData[object_name]
bbox.append(category)
res.append(bbox)
# print(res)
if len(res) == 0:
detections[name] = np.zeros((0, 5), dtype=np.float32)
else:
detections[name] = np.array(res, dtype=np.float32)
return detections, eval_ids
def image_path(self, ind):
file_name = self._image_ids[ind]
return os.path.join(self._data_dir, file_name)
def detections(self, ind):
file_name = self._image_ids[ind]
return self._detections[file_name].copy()
def cls2name(self, cls):
myData = self._cls2myData[cls]
return self._myData2name[myData]
if __name__ == '__main__':
detections, eval_ids = _load_myData_annos(xml_path)
print(detections, eval_ids)
|
441507
|
from future.backports.urllib.parse import urlencode
from future.backports.urllib.parse import urlparse
import inspect
import os
import sys
from otest.result import get_issuer
def set_webfinger_resource(oper, args):
"""
Context: WebFinger Query
Action: Specifies the webfinger resource. If the OP supports
webfinger queries then the resource is set to the value of 'webfinger_url'
or 'webfinger_email' from the test instance configuration.
:param oper: An WebFinger instance
:param args: None or a dictionary with the key 'pattern'
"""
try:
oper.resource = oper.op_args["resource"]
except KeyError:
if oper.dynamic:
if args:
_p = urlparse(get_issuer(oper.conv))
oper.op_args["resource"] = args["pattern"].format(
test_id=oper.conv.test_id, host=_p.netloc,
oper_id=oper.conv.operator_id)
else:
_base = oper.sh.tool_conf['issuer']
if oper.conv.operator_id is None:
oper.resource = _base
else:
oper.resource = os.path.join(_base, oper.conv.operator_id,
oper.conv.test_id)
def set_configuration(oper, arg):
oper.conv.entity.capabilities.update(arg)
def set_start_page(oper, args):
_conf = oper.sh['test_conf']
_url = _conf['start_page']
_iss = oper.conv.entity.baseurl
_params = _conf['params'].replace('<issuer>', _iss)
_args = dict([p.split('=') for p in _params.split('&')])
oper.start_page = _url + '?' + urlencode(_args)
def set_op(oper, args):
_op = oper.conv.entity
for key, val in args.items():
_attr = getattr(_op, key)
if isinstance(_attr, dict):
_attr.update(val)
else:
_attr = val
def set_request_base(oper, args):
oper.op_args['base_path'] = '{}{}/'.format(oper.conv.entity.base_url, args)
oper.op_args['local_dir'] = args
def set_discovery_issuer(oper, args):
"""
Context: Authorization Query
Action: Pick up issuer ID either from static configuration or dynamic
discovery.
:param oper: An AsyncAuthn instance
:param args: None
"""
if oper.dynamic:
oper.op_args["issuer"] = get_issuer(oper.conv)
def resource(oper, args):
"""
Context:
Action:
Example:
:param oper:
:param args:
:return:
"""
_p = urlparse(get_issuer(oper.conv))
oper.op_args["resource"] = args["pattern"].format(
test_id=oper.conv.test_id, host=_p.netloc,
oper_id=oper.conv.operator_id)
def set_jwks_uri(oper, args):
"""
Context: AsyncAuthn
Action:
Example:
:param oper:
:param args:
:return:
"""
oper.req_args["jwks_uri"] = oper.conv.entity.jwks_uri
def remove_grant(oper, arg):
"""
Context:
Action:
Example:
:param oper:
:param args:
:return:
"""
oper.conv.entity.grant = {}
def conditional_execution(oper, arg):
"""
Context: AccessToken/UserInfo
Action: If the condition is not fulfilled the operation will not be
executed.
Example:
"conditional_execution":{
"return_type": ["CIT","CI","C","CT"]
}
"""
for key, val in arg.items():
if key == 'profile':
try:
if oper.profile[0] not in val.split(','):
oper.skip = True
return
except AttributeError:
if oper.profile[0] not in val:
oper.skip = True
return
elif key == 'return_type':
if oper.profile[0] not in val:
oper.skip = True
return
def factory(name):
for fname, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isfunction(obj):
if fname == name:
return obj
from oidctest.testfunc import factory as ot_factory
return ot_factory(name)
|
441509
|
import json
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
URL_TEMPLATE = "http://www2.assemblee-nationale.fr/scrutins/liste/(offset)/{offset}/(legislature)/15/(type)/TOUS/(idDossier)/TOUS"
def parse_tableau_scrutins():
num = None
nums = set()
offset = 0
while True:
url = URL_TEMPLATE.format(offset=offset)
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'lxml')
should_break = False
for line in soup.select('#listeScrutins tbody tr'):
cells = list(line.select("td"))
links = list(cells[2].select('a'))
link_dos = None
if len(links) == 2:
link_dos = urljoin(url, links[0]["href"])
link_scrutin = urljoin(url, links[1]["href"])
else:
link_scrutin = urljoin(url, links[0]["href"])
num = int(cells[0].text.strip().replace('*', ''))
if num in nums:
should_break = True
break
nums.add(num)
data = {
"numero": num,
"date": cells[1].text.strip(),
"objet": cells[2].text.replace('[dossier] [analyse du scrutin]', '').strip(),
"pour": int(cells[3].text.strip()),
"contre": int(cells[4].text.strip()),
"abstention": int(cells[5].text.strip()),
"url_dossier": link_dos,
"url_scrutin": link_scrutin,
}
print(json.dumps(data, ensure_ascii=False))
if should_break:
break
offset += 100
|
441521
|
from mock import patch
from bot.messenger import Messenger
class TestMessenger():
class MockClient():
def bot_user_id(self):
return 'mr. mockbot'
@patch('bot.messenger.Messenger')
def test_write_help_message(self, mockMessenger):
messenger = Messenger(self.MockClient())
with patch.object(messenger, 'send_message') as mock_messenger:
mock_ch_id = 1000
messenger.write_help_message(1000)
expected_message = str(
"I'm your friendly Slack bot written in Python. I'll *_respond_* to the following commands:" +
"\n> `hi <@mr. mockbot>` - I'll respond with a randomized greeting mentioning your user. :wave:" +
"\n> `<@mr. mockbot> joke` - I'll tell you one of my finest jokes, with a typing pause for effect. :laughing:" +
"\n> `<@mr. mockbot> attachment` - I'll demo a post with an attachment using the Web API. :paperclip:"
)
mock_messenger.assert_called_with(1000, expected_message)
@patch('bot.messenger.Messenger')
def test_write_error(self, mockMessenger):
messenger = Messenger(self.MockClient())
with patch.object(messenger, 'send_message') as mock_messenger:
mock_ch_id = 1000
messenger.write_error(mock_ch_id, "error message")
expected_message = ":face_with_head_bandage: my maker didn't handle this error very well:\n>```error message```"
mock_messenger.assert_called_with(1000, expected_message)
|
441559
|
import pytest
import mdtraj as md
from janus import partition
import numpy as np
import os
water = os.path.join(str('tests/files/test_openmm/water.pdb'))
traj = md.load(water)
dis = partition.DistancePartition(traj, traj.topology, 3.8, 4.5)
dis_0 = partition.DistancePartition(traj, traj.topology, 3.8, 4.5)
dis_1 = partition.DistancePartition(traj, traj.topology, 3.8, 4.5)
dis_2 = partition.DistancePartition(traj, traj.topology, 3.8, 4.5)
def test_set_Rmin():
dis_0.set_Rmin(2.6)
dis_1.set_Rmin(2.6)
dis_2.set_Rmin(2.6)
assert dis.get_Rmin() == 3.8
assert dis_0.get_Rmin() == 2.6
assert dis_1.get_Rmin() == 2.6
assert dis_2.get_Rmin() == 2.6
def test_set_Rmax():
dis_0.set_Rmax(2.8)
dis_1.set_Rmax(3.2)
dis_2.set_Rmax(3.4)
assert dis.get_Rmax() == 4.5
assert dis_0.get_Rmax() == 2.8
assert dis_1.get_Rmax() == 3.2
assert dis_2.get_Rmax() == 3.4
def test_edit_atoms():
atom1 = dis.edit_atoms(atoms=[0,1,2,3,4], res_idx=1, remove=True)
atom2 = dis.edit_atoms(atoms=[0,1,2,3,4], res_idx=1, add=True)
assert np.allclose(np.array([0,1,2]), np.array(atom1))
assert np.allclose(np.array([0,1,2,3,4,5]), np.array(atom2))
def test_define_buffer_zone():
dis.define_buffer_zone([0], [0])
dis_0.define_buffer_zone([0], [0])
dis_1.define_buffer_zone([0], [0])
dis_2.define_buffer_zone([0], [0])
assert (dis.buffer_atoms == [8] and not dis.buffer_groups)
assert (not dis_0.buffer_atoms and not dis_0.buffer_groups)
assert (dis_1.buffer_atoms == [3] and dis_1.buffer_groups[1].atoms == [3, 4, 5])
assert np.allclose(dis_2.buffer_atoms, np.array([3, 5, 6]))
assert (dis_2.buffer_groups[1].atoms == [3, 4, 5] and dis_2.buffer_groups[2].atoms == [6, 7, 8])
def test_get_residue_info():
res = dis.get_residue_info(0)
res1 = dis.get_residue_info(1)
assert np.allclose(np.array([0,1,2]), np.array(res.atoms))
assert np.allclose(0.0655606189723, res.r_i)
assert np.allclose(np.array([3,4,5]), np.array(res1.atoms))
assert np.allclose(3.10273031189, res1.r_i)
def test_compute_COM():
xyz, weight, ratio = dis_1.compute_COM(atoms=[0,1,2])
com = np.array([0.011130575, .354230624, .588089475])
assert weight == {0: 15.999, 1: 1.008, 2: 1.008}
assert ratio == {0: 0.8880932556203164, 1: 0.055953372189841796, 2: 0.055953372189841796}
assert np.allclose(xyz, com)
|
441561
|
import shortuuid
from hattori.base import BaseAnonymizer, faker
from diplomas.models import Diploma
class EmailLogEntryAnonymizer(BaseAnonymizer):
model = Diploma
attributes = [
('created', faker.date),
('modified', faker.date),
('slug', shortuuid.uuid),
('image', lambda: f'{faker.uri_path()}.jpg'),
]
|
441566
|
import datetime
#~ from flask import current_app as app
from peewee import *
from playhouse.db_url import connect
from pantry.config import read_config_file
def get_database(fresh_config=False):
config = read_config_file()
DATABASE = config.get('database', 'file')
if DATABASE.startswith('/'):
DATABASE = 'sqlite:///{}'.format(DATABASE)
return connect(DATABASE)
class BaseModel(Model):
class Meta:
database = get_database()
class Users(BaseModel):
name = CharField(null=True)
password = <PASSWORD>Field()
su = CharField(null=True)
username = CharField()
email = CharField(null=True)
active = BooleanField(default=True)
def __str__(self):
return '%s %s' % (self.username, self.name)
class Meta:
table_name = 'users'
class Projects(BaseModel):
title = CharField()
description = TextField(null=True)
admin = ForeignKeyField(Users)
created_date = DateTimeField(default=datetime.datetime.now)
active = BooleanField(default=True)
def __str__(self):
return self.title
class Meta:
table_name = 'projects'
def get_containers(self):
result = Containers.select().where(Containers.project==self.id)
return result
class Hosts(BaseModel):
hostname = CharField()
api_user = CharField()
api_token = CharField()
admin = ForeignKeyField(Users)
active = BooleanField(default=True)
is_available = BooleanField(default=True)
def __str__(self):
return self.hostname
class Meta:
table_name = 'hosts'
class Containers(BaseModel):
name = CharField()
host = ForeignKeyField(Hosts)
admin = ForeignKeyField(Users)
locked = BooleanField(default=False)
project = ForeignKeyField(Projects, null=True)
created_date = DateTimeField(default=datetime.datetime.now)
def __str__(self):
return self.name
class Meta:
table_name = 'containers'
indexes = (
# Specify a unique multi-column index on from/to-user.
(('name', 'host'), True),
)
class Tags(BaseModel):
name = CharField()
color = CharField(null=True)
def __str__(self):
return self.name
class Meta:
table_name = 'tags'
class ContainerTag(BaseModel):
container = ForeignKeyField(Containers)
tag = ForeignKeyField(Tags)
class ApiTokens(BaseModel):
description = CharField(null=True)
token = CharField()
username = CharField(null=True)
def __str__(self):
return '%s: %s' % (self.description, self.token)
class Meta:
table_name = 'api_tokens'
primary_key = False
|
441580
|
from pyracing.helpers import parse_encode
class SubSessionData:
def __init__(self, data):
self.cat_id = data['catid']
self.caution_laps = data['ncautionlaps']
self.caution_type = data['cautiontype']
self.cautions = data['ncautions']
self.corners_total = data['cornersperlap']
self.driver_change_param_1 = data['driver_change_param1']
self.driver_change_param_2 = data['driver_change_param2']
self.driver_change_rule = data['driver_change_rule']
self.driver_changes = data['driver_changes']
self.event_type = data['evttype']
self.fog_density = data['weather_fog_density']
self.humidity = data['weather_rh']
self.lap_avg = data['eventavglap']
self.laps_completed = data['eventlapscomplete']
self.laps_for_qual_avg = data['nlapsforqualavg']
self.laps_for_solo_avg = data['nlapsforsoloavg']
self.lead_changes = data['nleadchanges']
self.league_id = data['leagueid']
self.league_season_id = data['league_season_id']
self.leave_marbles = data['leavemarbles']
self.max_weeks = data['maxweeks']
self.points_type = data['pointstype']
self.private_session_id = data['privatesessionid']
self.race_week = data['race_week_num']
self.reserve_status = data['rserv_status']
self.rubber_practice = data['rubberlevel_practice']
self.rubber_qualify = data['rubberlevel_qualify']
self.rubber_race = data['rubberlevel_race']
self.rubber_warmup = data['rubberlevel_warmup']
self.season_id = data['seasonid']
self.season_name = parse_encode(data['season_name'])
self.season_name_short = parse_encode(data['season_shortname'])
self.season_quarter = data['season_quarter']
self.season_year = data['season_year']
self.series_id = data['seriesid']
self.series_name = parse_encode(data['series_name'])
self.series_name_short = parse_encode(data['series_shortname'])
self.session_id = data['sessionid']
self.session_name = parse_encode(data['sessionname'])
self.sim_ses_type = data['simsestype']
self.skies = data['weather_skies']
self.special_event_type = data['specialeventtype']
self.special_event_type_text = data['specialeventtypetext']
self.strength_of_field = data['eventstrengthoffield']
self.subsession_id = data['subsessionid']
self.team_drivers_max = data['max_team_drivers']
self.team_drivers_min = data['min_team_drivers']
self.temp_unit = data['weather_temp_units']
self.temp_value = data['weather_temp_value']
self.time_of_day = data['timeofday']
self.time_start = parse_encode(data['start_time'])
self.time_start_sim = parse_encode(data['simulatedstarttime'])
self.track = parse_encode(data['track_name'])
self.track_config = parse_encode(data['track_config_name'])
self.track_id = data['trackid']
self.weather_initial = data['weather_var_initial']
self.weather_ongoing = data['weather_var_ongoing']
self.weather_type = data['weather_type']
self.wind_direction = data['weather_wind_dir']
self.wind_speed_unit = data['weather_wind_speed_units']
self.wind_speed_value = data['weather_wind_speed_value']
self.drivers = [self.Driver(x) for x in data['rows']]
class Driver:
def __init__(self, data):
self.car_class_id = data['carclassid']
self.car_class_name = parse_encode(data['ccName'])
self.car_class_name_short = parse_encode(data['ccNameShort'])
self.car_color_1 = data['car_color1']
self.car_color_2 = data['car_color2']
self.car_color_3 = data['car_color3']
self.car_id = data['carid']
self.car_num = data['carnum']
self.car_num_font = data['carnumberfont']
self.car_num_slant = data['carnumberslant']
self.car_number_color_1 = data['car_number_color1']
self.car_number_color_2 = data['car_number_color2']
self.car_number_color_3 = data['car_number_color3']
self.car_pattern = data['car_pattern']
self.car_sponser_1 = data['carsponsor1']
self.car_sponser_2 = data['carsponsor2']
self.club_id = data['clubid']
self.club_name = parse_encode(data['clubname'])
self.club_name_short = parse_encode(data['clubshortname'])
self.club_points = data['clubpoints']
self.cpi_new = data['newcpi']
self.cpi_old = data['oldcpi']
self.cust_id = data['custid']
self.damage_model = data['damage_model']
self.display_name = parse_encode(data['displayname'])
self.division = data['division']
self.division_name = parse_encode(data['divisionname'])
self.drop_race = data['dropracepoints']
self.event_type_name = parse_encode(data['evttypename'])
self.group_id = data['groupid']
self.heat_info_id = data['heatinfoid']
self.helm_color_1 = data['helm_color1']
self.helm_color_2 = data['helm_color2']
self.helm_color_3 = data['helm_color3']
self.helm_pattern = data['helm_pattern']
self.host_id = data['hostid']
self.incidents = data['incidents']
self.interval = data['interval']
self.interval_class = data['classinterval']
self.irating_new = data['newirating']
self.irating_old = data['oldirating']
self.lap_avg = data['avglap']
self.lap_best = data['bestlaptime']
self.lap_best_n = data['bestlapnum']
self.lap_qual_best = data['bestquallaptime']
self.lap_qual_best_at = data['bestquallapat']
self.lap_qual_best_n = data['bestquallapnum']
self.lap_qual_best_time = data['quallaptime']
self.laps_best_n_num = data['bestnlapsnum']
self.laps_best_n_time = data['bestnlapstime']
self.laps_comp = data['lapscomplete']
self.laps_led = data['lapslead']
self.laps_opt_comp = data['optlapscomplete']
self.league_points = data['league_points']
self.license_category = parse_encode(data['licensecategory'])
self.license_change_oval = data['license_change_oval']
self.license_change_road = data['license_change_road']
self.license_class = data['licensegroup']
self.license_level_new = data['newlicenselevel']
self.license_level_old = data['oldlicenselevel']
self.multiplier = data['multiplier']
self.official = data['officialsession']
self.pct_fuel_fill_max = data['max_pct_fuel_fill']
self.points_champ = data['champpoints']
self.points_champ_agg = data['aggchamppoints']
self.pos = data['pos']
self.pos_finish = data['finishpos']
self.pos_finish_class = data['finishposinclass']
self.pos_start = data['startpos']
self.reason_out = parse_encode(data['reasonout'])
self.reason_out_id = data['reasonoutid']
self.restrict_results = parse_encode(data['restrictresults'])
self.sim_ses_name = parse_encode(data['simsesname'])
self.sim_ses_num = data['simsesnum']
self.sim_ses_type_name = parse_encode(data['simsestypename'])
self.sub_level_new = data['newsublevel']
self.sub_level_old = data['oldsublevel']
self.suit_color_1 = data['suit_color1']
self.suit_color_2 = data['suit_color2']
self.suit_color_3 = data['suit_color3']
self.suit_pattern = data['suit_pattern']
self.time_session_start = data['sessionstarttime']
self.track_cat_id = data['track_catid']
self.track_category = parse_encode(data['track_category'])
self.ttrating_new = data['newttrating']
self.ttrating_old = data['oldttrating']
self.vehicle_key_id = data['vehiclekeyid']
self.weight_penalty_kg = data['weight_penalty_kg']
self.wheel_chrome = data['wheel_chrome']
self.wheel_color = parse_encode(data['wheel_color'])
# Race laps for all drivers of a session
class RaceLapsAll:
def __init__(self, data):
self.details = self.Details(data['details'])
self.drivers = [self.Driver(x) for x in data['startgrid']]
self.laps = [self.Lap(x) for x in data['lapdata']]
class Details:
def __init__(self, data):
self.date = data['eventDate']
self.date_unix_utc_ms = data['eventDateUTCMilliSecs']
self.driver_changes = data['driverChanges']
self.event_type = data['eventType']
self.event_type_name = parse_encode(data['eventTypeName'])
self.laps_for_qual_avg = data['nLapsForQualAvg']
self.laps_for_solo_avg = data['nLapsForSoloAvg']
self.official = data['officialSession']
self.private_session_id = data['privateSessionID']
self.private_session_name = parse_encode(
data['privateSessionName'])
self.race_panel_img = parse_encode(data['race_panel_img'])
self.race_week = data['raceWeek']
self.season_id = data['seasonID']
self.season_name = parse_encode(data['seasonName'])
self.season_name_short = parse_encode(data['seasonShortName'])
self.series_name = parse_encode(data['seriesName'])
self.series_name_short = parse_encode(data['seriesShortName'])
self.session_id = data['sessionId']
self.subsession_id = data['subSessionId']
self.track = parse_encode(data['trackName'])
self.track_config = parse_encode(data['trackConfig'])
self.track_id = data['trackid']
class Driver:
def __init__(self, data):
self.car_num = data['carnum']
self.cust_id = data['custid']
self.display_name = parse_encode(data['displayName'])
self.friend = data['friend']
self.group_id = data['groupid']
self.helmet_color_1 = data['helmetColor1']
self.helmet_color_2 = data['helmetColor2']
self.helmet_color_3 = data['helmetColor3']
self.helmet_pattern = data['helmetPattern']
self.incidents = data['numIncidents']
self.lap_avg = data['avgLapTime']
self.lap_best_num = data['fastestLapNum']
self.lap_best_time = data['fastestLapTime']
self.license_color = data['licenseColor']
self.points_champ = data['points']
self.pos_finish = data['finishPos']
self.pos_start = data['startPos']
self.watch = data['watch']
class Lap:
def __init__(self, data):
self.car_num = data['carnum']
self.cust_id = data['custid']
self.flags = data['flags']
self.lap_num = data['lapnum']
self.time_ses = data['sesTime']
# Race laps for single driver of a session
class RaceLapsDriver:
def __init__(self, data):
self.drivers = [self.Driver(x) for x in data['drivers']]
self.header = self.Header(data['header'])
self.laps = [self.Lap(x) for x in data['lapData']]
class Lap:
def __init__(self, data):
self.cust_id = data['custid']
self.flags = data['flags']
self.lap_num = data['lap_num']
self.time_ses = data['ses_time']
class Header:
def __init__(self, data):
self.car_color_1 = data['carColor1']
self.car_color_2 = data['carColor2']
self.car_color_3 = data['carColor3']
self.car_id = data['carid']
self.car_num = data['carNum']
self.car_pattern = data['carPattern']
self.date_unix_utc_ms = data['eventDateUTCMilliSecs']
self.event_date = data['eventDate']
self.event_type = data['eventtype']
self.event_type_name = parse_encode(data['eventTypeName'])
self.laps_for_qual = data['nlapsforqual']
self.laps_for_solo = data['nlapsforsolo']
self.season_name = parse_encode(data['seasonName'])
self.season_name_short = parse_encode(data['seasonShortName'])
self.series_name = parse_encode(data['seriesName'])
self.series_name_short = parse_encode(data['seriesShortName'])
self.session_id = data['sessionId']
self.subsession_id = data['subSessionId']
self.suit_color_1 = data['suitColor1']
self.suit_color_2 = data['suitColor2']
self.suit_color_3 = data['suitColor3']
self.suit_pattern = data['suitPattern']
self.team_name = parse_encode(data['teamName'])
self.track_config = parse_encode(data['trackConfig'])
self.track_id = data['trackID']
self.track_name = parse_encode(data['trackName'])
class Driver:
def __init__(self, data):
self.cust_id = data['custid']
self.display_name = parse_encode(data['displayname'])
self.helm_color_1 = data['helm_color1']
self.helm_color_2 = data['helm_color2']
self.helm_color_3 = data['helm_color3']
self.helm_pattern = data['helm_pattern']
self.lap_best = data['bestlaptime']
self.lap_best_n = data['bestlapnum']
self.lap_qual_best = data['bestquallaptime']
self.lap_qual_best_at = data['bestquallapat']
self.lap_qual_best_n = data['bestquallapnum']
self.laps_n_best_num = data['bestnlapsnum']
self.laps_n_best_time = data['bestnlapstime']
self.license_level = data['licenselevel']
|
441757
|
from colour import Color
class ColorRangeModule(object):
"""
Class to dynamically generate and select colors.
Requires the PyPI package `colour`
"""
start_color = "#00FF00"
end_color = 'red'
@staticmethod
def get_hex_color_range(start_color, end_color, quantity):
"""
Generates a list of quantity Hex colors from start_color to end_color.
:param start_color: Hex or plain English color for start of range
:param end_color: Hex or plain English color for end of range
:param quantity: Number of colours to return
:return: A list of Hex color values
"""
raw_colors = [c.hex for c in list(Color(start_color).range_to(Color(end_color), quantity))]
colors = []
for color in raw_colors:
# i3bar expects the full Hex value but for some colors the colour
# module only returns partial values. So we need to convert these colors to the full
# Hex value.
if len(color) == 4:
fixed_color = "#"
for c in color[1:]:
fixed_color += c * 2
colors.append(fixed_color)
else:
colors.append(color)
return colors
def get_gradient(self, value, colors, upper_limit=100):
"""
Map a value to a color
:param value: Some value
:return: A Hex color code
"""
index = int(self.percentage(value, upper_limit))
if index >= len(colors):
return colors[-1]
elif index < 0:
return colors[0]
else:
return colors[index]
@staticmethod
def percentage(part, whole):
"""
Calculate percentage
"""
if whole == 0:
return 0
return 100 * float(part) / float(whole)
|
441804
|
import os
import time
import uuid
import requests
from conans.errors import RecipeNotFoundException, PackageNotFoundException
from conans.server.revision_list import _RevisionEntry
ARTIFACTORY_DEFAULT_USER = os.getenv("ARTIFACTORY_DEFAULT_USER", "admin")
ARTIFACTORY_DEFAULT_PASSWORD = os.getenv("ARTIFACTORY_DEFAULT_PASSWORD", "password")
ARTIFACTORY_DEFAULT_URL = os.getenv("ARTIFACTORY_DEFAULT_URL", "http://localhost:8090/artifactory")
class _ArtifactoryServerStore(object):
def __init__(self, repo_url, user, password):
self._user = user or ARTIFACTORY_DEFAULT_USER
self._password = password or ARTIFACTORY_DEFAULT_PASSWORD
self._repo_url = repo_url
@property
def _auth(self):
return self._user, self._password
@staticmethod
def _root_recipe(ref):
return "{}/{}/{}/{}".format(ref.user, ref.name, ref.version, ref.channel)
@staticmethod
def _ref_index(ref):
return "{}/index.json".format(_ArtifactoryServerStore._root_recipe(ref))
@staticmethod
def _pref_index(pref):
tmp = _ArtifactoryServerStore._root_recipe(pref.ref)
return "{}/{}/package/{}/index.json".format(tmp, pref.ref.revision, pref.id)
def get_recipe_revisions(self, ref):
time.sleep(0.1) # Index appears to not being updated immediately after a remove
url = "{}/{}".format(self._repo_url, self._ref_index(ref))
response = requests.get(url, auth=self._auth)
response.raise_for_status()
the_json = response.json()
if not the_json["revisions"]:
raise RecipeNotFoundException(ref)
tmp = [_RevisionEntry(i["revision"], i["time"]) for i in the_json["revisions"]]
return tmp
def get_package_revisions(self, pref):
time.sleep(0.1) # Index appears to not being updated immediately
url = "{}/{}".format(self._repo_url, self._pref_index(pref))
response = requests.get(url, auth=self._auth)
response.raise_for_status()
the_json = response.json()
if not the_json["revisions"]:
raise PackageNotFoundException(pref)
tmp = [_RevisionEntry(i["revision"], i["time"]) for i in the_json["revisions"]]
return tmp
def get_last_revision(self, ref):
revisions = self.get_recipe_revisions(ref)
return revisions[0]
def get_last_package_revision(self, ref):
revisions = self.get_package_revisions(ref)
return revisions[0]
class ArtifactoryServer(object):
def __init__(self, *args, **kwargs):
self._user = ARTIFACTORY_DEFAULT_USER
self._password = <PASSWORD>
self._url = ARTIFACTORY_DEFAULT_URL
self._repo_name = "conan_{}".format(str(uuid.uuid4()).replace("-", ""))
self.create_repository()
self.server_store = _ArtifactoryServerStore(self.repo_url, self._user, self._password)
@property
def _auth(self):
return self._user, self._password
@property
def repo_url(self):
return "{}/{}".format(self._url, self._repo_name)
@property
def repo_api_url(self):
return "{}/api/conan/{}".format(self._url, self._repo_name)
def recipe_revision_time(self, ref):
revs = self.server_store.get_recipe_revisions(ref)
for r in revs:
if r.revision == ref.revision:
return r.time
return None
def package_revision_time(self, pref):
revs = self.server_store.get_package_revisions(pref)
for r in revs:
if r.revision == pref.revision:
return r.time
return None
def create_repository(self):
url = "{}/api/repositories/{}".format(self._url, self._repo_name)
config = {"key": self._repo_name, "rclass": "local", "packageType": "conan"}
ret = requests.put(url, auth=self._auth, json=config)
ret.raise_for_status()
def package_exists(self, pref):
try:
revisions = self.server_store.get_package_revisions(pref)
if pref.revision:
for r in revisions:
if pref.revision == r.revision:
return True
return False
return True
except Exception: # When resolves the latest and there is no package
return False
def recipe_exists(self, ref):
try:
revisions = self.server_store.get_recipe_revisions(ref)
if ref.revision:
for r in revisions:
if ref.revision == r.revision:
return True
return False
return True
except Exception: # When resolves the latest and there is no package
return False
|
441819
|
import re
import sys
from hulks.base import BaseHook
class CheckLoggerHook(BaseHook):
def _show_error_message(self, filename, line_number):
msg = "{}, line={}: preferably logger should be set with __name__"
print(msg.format(filename, line_number))
def validate(self, filename, **options):
retval = True
pattern = re.compile(r"\((.+)\)")
for lino, line in self.lines_iterator(filename):
if "getLogger(" not in line:
continue
if line.startswith((" ", "\t")):
continue
matcher = re.search(pattern, line)
if not matcher:
self._show_error_message(filename, lino)
retval = False
continue
matches = matcher.groups()
for mt in matches:
if mt.startswith("'") or mt.startswith('"'):
continue
if mt == "__name__":
continue
self._show_error_message(filename, lino)
retval = False
continue
return retval
def main(args=None):
"""Checks 'getLogger' usage"""
hook = CheckLoggerHook()
sys.exit(hook.handle(args))
if __name__ == "__main__":
main(sys.argv[1:])
|
441840
|
import pygame
import pymunk
from .base_shape import BaseShape
from .util import to_pygame
class Motor(BaseShape):
def __init__(self, space, shape1, speed):
# Associate the motor with the location of one of the bodies so
# it is removed when that body is out of the simulation
self.body = shape1.body
self.shape = pymunk.SimpleMotor(shape1.body, space.static_body, speed)
super().__init__()
space.add(self.shape)
def has_own_body(self):
return False
def _draw(self, screen):
p = to_pygame(self.body.position)
radius = 10
rect = pygame.Rect(p[0] - radius/2, p[1] - radius/2, radius, radius)
pygame.draw.arc(screen, self.color, rect, 1, 6)
if self.shape.rate > 0:
pygame.draw.circle(screen, self.color, rect.topright, 2, 0)
else:
pygame.draw.circle(screen, self.color, rect.bottomright, 2, 0)
def _pin_points(self):
raise Exception('Do not use paste_on for motors')
def __repr__(self):
return 'motor: p(' + str(self.body.position.x) + ',' + str(self.body.position.y) + '), speed: ' + \
str(self.shape.speed)
|
441849
|
from dppy.beta_ensembles import CircularEnsemble
circular = CircularEnsemble(beta=2) # beta must be >=0 integer, default beta=2
# See the cristallization of the configuration as beta increases
for b in [0, 1, 5, 10]:
circular.beta = b
circular.sample_banded_model(size_N=30)
circular.plot()
circular.beta = 2
circular.sample_banded_model(size_N=1000)
circular.hist()
|
441863
|
import os
from flask import Flask
from flask_pymongo import PyMongo
mongo_uri = os.getenv('MONGO_URI', 'mongodb://localhost:27017/ct-eligible')
app = Flask(__name__)
app.config["MONGO_URI"] = mongo_uri
mongo = PyMongo(app)
|
441867
|
import json
from http import HTTPStatus
from mockupdb import MockupDB, go, Command
from json import dumps
import datetime
from bridges.tests.api.basic_test import BasicTest
import bridges.api.logic
SURVEYS_ENDPOINT = 'surveys/'
class PostSurveysTest(BasicTest):
def test_normal(self):
future = self.make_future_post_request(SURVEYS_ENDPOINT, dict(title="title", hideVotes=True))
# get new survey number
request = self.server.receives()
request.ok(cursor={'id': 0, 'firstBatch': []})
# insert new survey
request = self.server.receives()
request.ok()
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.CREATED)
json_result = json.loads(http_response.data.decode())
self.assertIn('results_secret', json_result)
self.assertIn('admin_secret', json_result)
self.assertEqual('title-1', json_result['key'])
def test_without_hide_votes(self):
future = self.make_future_post_request(SURVEYS_ENDPOINT, dict(title="title"))
# get new survey number
request = self.server.receives()
request.ok(cursor={'id': 0, 'firstBatch': []})
# insert new survey
request = self.server.receives()
request.ok()
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.CREATED)
def test_bad_request(self):
future = self.make_future_post_request(SURVEYS_ENDPOINT, dict(this_is_bad_title_key="title"))
http_response = future()
self.assertEqual(http_response.status_code, HTTPStatus.BAD_REQUEST)
def test_serverIssue(self):
def broken_create_survey(title, hide_votes, is_anonymous, description, author):
raise ArithmeticError
temp = bridges.api.logic.create_survey
bridges.api.logic.create_survey = broken_create_survey
future = self.make_future_post_request(SURVEYS_ENDPOINT, dict(title="title"))
http_response = future()
bridges.api.logic.create_survey = temp
self.assertEqual(
http_response.status_code,
HTTPStatus.INTERNAL_SERVER_ERROR)
|
441921
|
import pytest
import numpy as np
from qutip.qip.device import DispersiveCavityQED, CircularSpinChain
from qutip.qip.compiler import (
SpinChainCompiler, CavityQEDCompiler, Instruction, GateCompiler
)
from qutip.qip.circuit import QubitCircuit
from qutip import basis, fidelity
def test_compiling_with_scheduler():
"""
Here we test if the compiling with scheduler works properly.
The non scheduled pulse should be twice as long as the scheduled one.
The numerical results are tested in test_device.py
"""
circuit = QubitCircuit(2)
circuit.add_gate("X", 0)
circuit.add_gate("X", 1)
processor = DispersiveCavityQED(2)
processor.load_circuit(circuit, schedule_mode=None)
tlist = processor.get_full_tlist()
time_not_scheduled = tlist[-1]-tlist[0]
coeffs, tlist = processor.load_circuit(circuit, schedule_mode="ASAP")
tlist = processor.get_full_tlist()
time_scheduled1 = tlist[-1]-tlist[0]
coeffs, tlist = processor.load_circuit(circuit, schedule_mode="ALAP")
tlist = processor.get_full_tlist()
time_scheduled2 = tlist[-1]-tlist[0]
assert(abs(time_scheduled1 * 2 - time_not_scheduled) < 1.0e-10)
assert(abs(time_scheduled2 * 2 - time_not_scheduled) < 1.0e-10)
def gauss_dist(t, sigma, amplitude, duration):
return amplitude/np.sqrt(2*np.pi) /sigma*np.exp(-0.5*((t-duration/2)/sigma)**2)
def gauss_rx_compiler(gate, args):
"""
Compiler for the RX gate
"""
targets = gate.targets # target qubit
parameters = args["params"]
h_x2pi = parameters["sx"][targets[0]] # find the coupling strength for the target qubit
amplitude = gate.arg_value / 2. / 0.9973 # 0.9973 is just used to compensate the finite pulse duration so that the total area is fixed
gate_sigma = h_x2pi / np.sqrt(2*np.pi)
duration = 6 * gate_sigma
tlist = np.linspace(0, duration, 100)
coeff = gauss_dist(tlist, gate_sigma, amplitude, duration)
pulse_info = [("sx" + str(targets[0]), coeff)] # save the information in a tuple (pulse_name, coeff)
return [Instruction(gate, tlist, pulse_info)]
class MyCompiler(GateCompiler): # compiler class
def __init__(self, num_qubits, params, pulse_dict):
super(MyCompiler, self).__init__(
num_qubits, params=params, pulse_dict=pulse_dict)
# pass our compiler function as a compiler for RX (rotation around X) gate.
self.gate_compiler["RX"] = gauss_rx_compiler
self.args.update({"params": params})
spline_kind = [
pytest.param("step_func", id="discrete"),
pytest.param("cubic", id="continuous"),
]
schedule_mode = [
pytest.param("ASAP", id="ASAP"),
pytest.param("ALAP", id="ALAP"),
pytest.param(False, id="No schedule"),
]
@pytest.mark.parametrize("spline_kind", spline_kind)
@pytest.mark.parametrize("schedule_mode", schedule_mode)
def test_compiler_with_continous_pulse(spline_kind, schedule_mode):
num_qubits = 2
circuit = QubitCircuit(num_qubits)
circuit.add_gate("X", targets=0)
circuit.add_gate("X", targets=1)
circuit.add_gate("X", targets=0)
processor = CircularSpinChain(num_qubits)
gauss_compiler = MyCompiler(
processor.N, processor.params, processor.pulse_dict)
processor.load_circuit(
circuit, schedule_mode = schedule_mode, compiler=gauss_compiler)
result = processor.run_state(init_state = basis([2,2], [0,0]))
assert(abs(fidelity(result.states[-1],basis([2,2],[0,1])) - 1) < 1.e-6)
def rx_compiler_without_pulse_dict(gate, args):
"""
Define a gate compiler that does not use pulse_dict but directly
give the index of control pulses in the Processor.
"""
targets = gate.targets
g = args["params"]["sx"][targets[0]]
coeff = np.sign(gate.arg_value) * g
tlist = abs(gate.arg_value) / (2 * g)
pulse_info = [(targets[0], coeff)]
return [Instruction(gate, tlist, pulse_info)]
def test_compiler_without_pulse_dict():
"""
Test for a compiler function without pulse_dict and using args.
"""
num_qubits = 2
circuit = QubitCircuit(num_qubits)
circuit.add_gate("X", targets=[0])
circuit.add_gate("X", targets=[1])
processor = CircularSpinChain(num_qubits)
compiler = SpinChainCompiler(
num_qubits, params=processor.params, pulse_dict=None, setup="circular")
compiler.gate_compiler["RX"] = rx_compiler_without_pulse_dict
compiler.args = {"params": processor.params}
processor.load_circuit(circuit, compiler=compiler)
result = processor.run_state(basis([2,2], [0,0]))
assert(abs(fidelity(result.states[-1], basis([2,2], [1,1])) - 1.) < 1.e-6 )
|
441980
|
import itertools
import os
from os import path
import re
from typing import Iterable, List, NamedTuple, Optional, Tuple, TypedDict, cast
import numpy as np
from tqdm import tqdm
import PIL
from PIL import Image, ImageFile
from rclip import db, model, utils
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ImageMeta(TypedDict):
modified_at: float
size: int
def get_image_meta(filepath: str) -> ImageMeta:
return ImageMeta(
modified_at=os.path.getmtime(filepath),
size=os.path.getsize(filepath)
)
def is_image_meta_equal(image: db.Image, meta: ImageMeta) -> bool:
for key in meta:
if meta[key] != image[key]:
return False
return True
class RClip:
EXCLUDE_DIRS_DEFAULT = ['@eaDir', 'node_modules', '.git']
IMAGE_REGEX = re.compile(r'^.+\.(jpe?g|png)$', re.I)
BATCH_SIZE = 8
DB_IMAGES_BEFORE_COMMIT = 50_000
class SearchResult(NamedTuple):
filepath: str
score: float
def __init__(self, model_instance: model.Model, database: db.DB, exclude_dirs: Optional[List[str]]):
self._model = model_instance
self._db = database
excluded_dirs = '|'.join(re.escape(dir) for dir in exclude_dirs or self.EXCLUDE_DIRS_DEFAULT)
self._exclude_dir_regex = re.compile(f'^.+\\/({excluded_dirs})(\\/.+)?$')
def _index_files(self, filepaths: List[str], metas: List[ImageMeta]):
images: List[Image.Image] = []
filtered_paths: List[str] = []
for path in filepaths:
try:
image = Image.open(path)
images.append(image)
filtered_paths.append(path)
except PIL.UnidentifiedImageError as ex:
pass
except Exception as ex:
print(f'error loading image {path}:', ex)
try:
features = self._model.compute_image_features(images)
except Exception as ex:
print('error computing features:', ex)
return
for path, meta, vector in cast(Iterable[Tuple[str, ImageMeta, np.ndarray]], zip(filtered_paths, metas, features)):
self._db.upsert_image(db.NewImage(
filepath=path,
modified_at=meta['modified_at'],
size=meta['size'],
vector=vector.tobytes()
), commit=False)
def ensure_index(self, directory: str):
# We will mark existing images as existing later
self._db.flag_images_in_a_dir_as_deleted(directory)
images_processed = 0
batch: List[str] = []
metas: List[ImageMeta] = []
for root, _, files in os.walk(directory):
if self._exclude_dir_regex.match(root):
continue
filtered_files = list(f for f in files if self.IMAGE_REGEX.match(f))
if not filtered_files:
continue
for file in cast(Iterable[str], tqdm(filtered_files, desc=root)):
filepath = path.join(root, file)
image = self._db.get_image(filepath=filepath)
try:
meta = get_image_meta(filepath)
except Exception as ex:
print(f'error getting fs metadata for {filepath}:', ex)
continue
if not images_processed % self.DB_IMAGES_BEFORE_COMMIT:
self._db.commit()
images_processed += 1
if image and is_image_meta_equal(image, meta):
self._db.remove_deleted_flag(filepath, commit=False)
continue
batch.append(filepath)
metas.append(meta)
if len(batch) >= self.BATCH_SIZE:
self._index_files(batch, metas)
batch = []
metas = []
if len(batch) != 0:
self._index_files(batch, metas)
self._db.commit()
def search(
self, query: str, directory: str, top_k: int = 10,
positive_queries: List[str] = [], negative_queries: List[str] = []) -> List[SearchResult]:
filepaths, features = self._get_features(directory)
positive_queries = [query] + positive_queries
sorted_similarities = self._model.compute_similarities_to_text(features, positive_queries, negative_queries)
filtered_similarities = filter(
lambda similarity: not self._exclude_dir_regex.match(filepaths[similarity[1]]),
sorted_similarities
)
top_k_similarities = itertools.islice(filtered_similarities, top_k)
return [RClip.SearchResult(filepath=filepaths[th[1]], score=th[0]) for th in top_k_similarities]
def _get_features(self, directory: str) -> Tuple[List[str], np.ndarray]:
filepaths: List[str] = []
features: List[np.ndarray] = []
for image in self._db.get_image_vectors_by_dir_path(directory):
filepaths.append(image['filepath'])
features.append(np.frombuffer(image['vector'], np.float32))
if not filepaths:
return [], np.ndarray(shape=(0, model.Model.VECTOR_SIZE))
return filepaths, np.stack(features)
def main():
arg_parser = utils.init_arg_parser()
args = arg_parser.parse_args()
current_directory = os.getcwd()
model_instance = model.Model()
datadir = utils.get_app_datadir()
database = db.DB(datadir / 'db.sqlite3')
rclip = RClip(model_instance, database, args.exclude_dir)
if not args.skip_index:
rclip.ensure_index(current_directory)
result = rclip.search(args.query, current_directory, args.top, args.add, args.subtract)
if args.filepath_only:
for r in result:
print(r.filepath)
else:
print('score\tfilepath')
for r in result:
print(f'{r.score:.3f}\t"{r.filepath}"')
if __name__ == '__main__':
main()
|
441991
|
import os
import sys
root = os.path.dirname(os.path.dirname(__file__))
sys.path.append(root)
import jinja2
from yapf.yapflib.yapf_api import FormatCode
from isort import SortImports
with open(os.path.join(root, 'etcd3/swaggerdefs/v3_3_x/__init__.py')) as f:
g = {}
exec(f.read(), g)
spec = g.get('spec_v3_3_x')
with open(os.path.join(root, 'scripts/model-template.jinja2')) as f:
model_tpl = jinja2.Template(f.read(), trim_blocks=True, lstrip_blocks=True)
with open(os.path.join(os.path.dirname(__file__), '../etcd3/models.py'), 'w') as f:
s = model_tpl.render(spec=spec)
print(s)
s = SortImports(file_contents=FormatCode(s, style_config='pep8')[0], force_single_line=True).output
f.write(s)
print(s)
|
442008
|
class I2C():
def __init__(self, interfaceNo):
self.interfaceNo = interfaceNo
self._read_result = {}
self._read_arguments = {}
self._write_arguments = {}
def set_readfrom_mem_result(self, register, result_bytes):
self._read_result[register] = result_bytes
def readfrom_mem(self, address, register, bytes):
self._read_arguments[register] = [address, register, bytes]
return self._read_result[register]
def get_readfrom_mem_arguments(self, register):
return self._read_arguments[register]
def writeto_mem(self, address, register, register_bytes):
self._write_arguments[register] = [address, register_bytes]
def get_writeto_mem_arguments(self, register):
return self._write_arguments[register]
|
442044
|
from bench import bench
print(bench(10, '''
def fib(n):
if n < 2: return n
return fib(n-1) + fib(n-2)
''', '''
fib(20)
'''))
|
442084
|
from sudachipy import tokenizer
from sudachipy import dictionary
# Initialize sudachipy
tokenizer_obj = dictionary.Dictionary().create()
mode = tokenizer.Tokenizer.SplitMode.C
def tokenize(text):
"""
A method for word segmentation.
Parameters
----------
text : str
An input text
Returns
-------
words : list
A list of words
"""
words = [m.surface() for m in tokenizer_obj.tokenize(text, mode)]
return words
def original_usage(text):
"""
Return the analysis results by SudachiPy.
Parameters
----------
text : str
An input text
Returns
-------
tokens : sudachipy.morphemelist.MorphemeList
The analysis results by SudachiPy
"""
tokens = tokenizer_obj.tokenize(text, mode)
return tokens
|
442173
|
import os
import random
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageDraw
from east import cfg
from east.utils import reorder_vertexes, shrink, point_inside_of_quad, point_inside_of_nth_quad
data_dir = cfg.data_dir
origin_img_dir = os.path.join(data_dir, cfg.origin_img_dir_name)
origin_txt_dir = os.path.join(data_dir, cfg.origin_txt_dir_name)
train_imgs_dir = os.path.join(data_dir, cfg.train_imgs_dir_name)
train_txts_dir = os.path.join(data_dir, cfg.train_txts_dir_name)
show_gt_img_dir = os.path.join(data_dir, cfg.show_gt_img_dir_name)
show_act_img_dir = os.path.join(data_dir, cfg.show_act_img_dir_name)
def preprocess():
# Missions:
# 1. make all the dir that may use.
# 2. generate train set.
# 3. generate gt (ground truth) images.
if not os.path.exists(train_imgs_dir):
os.mkdir(train_imgs_dir)
if not os.path.exists(train_txts_dir):
os.mkdir(train_txts_dir)
if not os.path.exists(show_gt_img_dir):
os.mkdir(show_gt_img_dir)
if not os.path.exists(show_act_img_dir):
os.mkdir(show_act_img_dir)
or_img_list = os.listdir(origin_img_dir)
print("[*] Found %d origin images." % len(or_img_list))
train_val_set = []
for or_img_fnm, _ in zip(or_img_list, tqdm(range(len(or_img_list)))):
or_img_nm, ext = os.path.splitext(or_img_fnm)
img_path = os.path.join(origin_img_dir, or_img_fnm)
with Image.open(img_path) as img:
# resize img and cal scale ratio.
d_width, d_height = cfg.max_train_img_size, cfg.max_train_img_size
scale_ratio_w = d_width / img.width
scale_ratio_h = d_height / img.height
img = img.resize((d_width, d_height), Image.NEAREST).convert('RGB')
show_gt_img = img.copy()
# draw on the img.
draw = ImageDraw.Draw(show_gt_img)
txt_path = os.path.join(origin_txt_dir, or_img_nm + ".txt")
with open(txt_path, 'r') as f:
tag_list = f.readlines()
# this array can save multi-tag labels.
xy_list_array = np.zeros((len(tag_list), 4, 2))
# paint gt img from the annotations.
for anno, i in zip(tag_list, range(len(tag_list))):
anno_col = anno.strip().split(',')
anno_array = np.array(anno_col)
# reshape it to 4*2 and scale it.
xy_list = np.reshape(anno_array[:8].astype(float), (4, 2))
xy_list[:, 0] = xy_list[:, 0] * scale_ratio_w
xy_list[:, 1] = xy_list[:, 1] * scale_ratio_h
# make sure the xy_list is in right order.
# if not, reorder it then store it.
xy_list = reorder_vertexes(xy_list)
xy_list_array[i] = xy_list
_, shrink_xy_list, _ = shrink(xy_list)
shrink_1, _, long_edge = shrink(xy_list, cfg.shrink_side_ratio)
# draw gt image.
# green line is label edge.
# blue line is shrinking edge.
draw.line([tuple(xy_list[0]), tuple(xy_list[1]),
tuple(xy_list[2]), tuple(xy_list[3]),
tuple(xy_list[0])],
width=2, fill="green")
draw.line([tuple(shrink_xy_list[0]),
tuple(shrink_xy_list[1]),
tuple(shrink_xy_list[2]),
tuple(shrink_xy_list[3]),
tuple(shrink_xy_list[0])],
width=2, fill="blue")
# yellow is head and foot line.
vs = [[[0, 0, 3, 3, 0], [1, 1, 2, 2, 1]],
[[0, 0, 1, 1, 0], [2, 2, 3, 3, 2]]]
for q_th in range(2):
draw.line([tuple(xy_list[vs[long_edge][q_th][0]]),
tuple(shrink_1[vs[long_edge][q_th][1]]),
tuple(shrink_1[vs[long_edge][q_th][2]]),
tuple(xy_list[vs[long_edge][q_th][3]]),
tuple(xy_list[vs[long_edge][q_th][4]])],
width=3, fill='yellow')
# save train img and labels.
img.save(os.path.join(train_imgs_dir, or_img_fnm))
np.save(os.path.join(train_txts_dir, or_img_nm + '.npy'), xy_list_array)
# save gt img.
show_gt_img.save(os.path.join(show_gt_img_dir, or_img_fnm))
train_val_set.append('{},{},{}\n'.format(or_img_fnm, d_width, d_height))
train_img_list = os.listdir(train_imgs_dir)
print('\nfound %d train images.' % len(train_img_list))
train_label_list = os.listdir(train_txts_dir)
print('found %d train labels.' % len(train_label_list))
# split train and val set.
random.shuffle(train_val_set)
val_count = int(cfg.validation_split_ratio * len(train_val_set))
with open(os.path.join(data_dir, cfg.val_fname), 'w') as f_val:
f_val.writelines(train_val_set[:val_count])
with open(os.path.join(data_dir, cfg.train_fname), 'w') as f_train:
f_train.writelines(train_val_set[val_count:])
def process_label():
# Missions:
# 1. generate act images.
# 2. generate gt labels in train set.
# Load stuff that generated from preprocess .
with open(os.path.join(data_dir, cfg.val_fname), 'r') as f_val:
f_list = f_val.readlines()
with open(os.path.join(data_dir, cfg.train_fname), 'r') as f_train:
f_list.extend(f_train.readlines())
for line, _ in zip(f_list, tqdm(range(len(f_list)))):
line_cols = str(line).strip().split(',')
img_name_ext, width, height = line_cols[0].strip(), int(line_cols[1].strip()), int(line_cols[2].strip())
img_name, _ = os.path.splitext(img_name_ext)
gt = np.zeros((height // cfg.pixel_size, width // cfg.pixel_size, 7))
xy_list_array = np.load(os.path.join(train_txts_dir, img_name + '.npy'))
with Image.open(os.path.join(train_imgs_dir, img_name_ext)) as im:
draw = ImageDraw.Draw(im)
for xy_list in xy_list_array:
_, shrink_xy_list, _ = shrink(xy_list, cfg.shrink_ratio)
shrink_1, _, long_edge = shrink(xy_list, cfg.shrink_side_ratio)
p_min = np.amin(shrink_xy_list, axis=0)
p_max = np.amax(shrink_xy_list, axis=0)
# floor of the float
ji_min = (p_min / cfg.pixel_size - 0.5).astype(int) - 1
# +1 for ceil of the float and +1 for include the end
ji_max = (p_max / cfg.pixel_size - 0.5).astype(int) + 3
i_min = np.maximum(0, ji_min[1])
i_max = np.minimum(height // cfg.pixel_size, ji_max[1])
j_min = np.maximum(0, ji_min[0])
j_max = np.minimum(width // cfg.pixel_size, ji_max[0])
for i in range(i_min, i_max):
for j in range(j_min, j_max):
px = (j + 0.5) * cfg.pixel_size
py = (i + 0.5) * cfg.pixel_size
if point_inside_of_quad(px, py, shrink_xy_list, p_min, p_max):
gt[i, j, 0] = 1
line_width, line_color = 1, 'red'
ith = point_inside_of_nth_quad(px, py, xy_list, shrink_1, long_edge)
vs = [[[3, 0], [1, 2]], [[0, 1], [2, 3]]]
if ith in range(2):
gt[i, j, 1] = 1
if ith == 0:
line_width, line_color = 2, 'yellow'
else:
line_width, line_color = 2, 'green'
gt[i, j, 2:3] = ith
gt[i, j, 3:5] = xy_list[vs[long_edge][ith][0]] - [px, py]
gt[i, j, 5:] = xy_list[vs[long_edge][ith][1]] - [px, py]
draw.line([(px - 0.5 * cfg.pixel_size,
py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size,
py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size,
py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size,
py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size,
py - 0.5 * cfg.pixel_size)],
width=line_width, fill=line_color)
# save act img.
im.save(os.path.join(show_act_img_dir, img_name_ext))
# save gt label in train label.
np.save(os.path.join(train_txts_dir, img_name + '_gt.npy'), gt)
if __name__ == '__main__':
print("[*] Start preprocess...")
preprocess()
process_label()
print("\n[*] Done.")
|
442190
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from rnn_encoder import RNNEncoder
'''
batch_first = False
LSTM输入: input, (h_0, c_0)
- input (seq_len, batch, input_size): 包含输入序列特征的Tensor。也可以是packed variable ,详见 [pack_padded_sequence](#torch.nn.utils.lstm.pack_padded_sequence(input, lengths, batch_first=False[source])
- h_0 (num_layers * num_directions, batch, hidden_size):保存着batch中每个元素的初始化隐状态的Tensor
- c_0 (num_layers * num_directions, batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
LSTM输出 output, (h_n, c_n)
- output (seq_len, batch, hidden_size * num_directions): 保存lstm最后一层的输出的Tensor。 如果输入是torch.nn.utils.lstm.PackedSequence,那么输出也是torch.nn.utils.lstm.PackedSequence。
- h_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着lstm最后一个时间步的隐状态。
- c_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着lstm最后一个时间步的细胞状态。
batch_first = True
LSTM输入: input, (h_0, c_0)
- input (batch, seq_len, input_size): 包含输入序列特征的Tensor。也可以是packed variable ,详见 [pack_padded_sequence](#torch.nn.utils.lstm.pack_padded_sequence(input, lengths, batch_first=False[source])
- h_0 (num_layers * num_directions, batch, hidden_size):保存着batch中每个元素的初始化隐状态的Tensor
- c_0 (num_layers * num_directions, batch, hidden_size): 保存着batch中每个元素的初始化细胞状态的Tensor
LSTM输出 output, (h_n, c_n)
- output (batch, seq_len, hidden_size * num_directions): 保存lstm最后一层的输出的Tensor。 如果输入是torch.nn.utils.lstm.PackedSequence,那么输出也是torch.nn.utils.lstm.PackedSequence。
- h_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着lstm最后一个时间步的隐状态。
- c_n (num_layers * num_directions, batch, hidden_size): Tensor,保存着lstm最后一个时间步的细胞状态。
'''
# att = W*tanh(V*X)
class SelfAttention(nn.Module):
def __init__(self, hidden_size):
super(SelfAttention, self).__init__()
self.correlation = nn.Sequential(
nn.Linear(hidden_size, hidden_size//2),
nn.Tanh(), # nn.ReLU()
nn.Linear(hidden_size//2, 1)
)
def forward(self, encoder_output): # [batch_size, seq_len, hidden_size]
a = self.correlation(encoder_output)
# print(a.shape) # [batch_size, seq_len, 1]
weights = F.softmax(a.squeeze(-1), dim=1) # squeeze:去掉a中指定的维数为1的维度
# print(weights.shape) # [batch_size, seq_len]
# [batch_size, seq_len, hidden_size] * [batch_size, seq_len, 1] -> [batch_size, hidden_size]
out = (encoder_output * weights.unsqueeze(-1)).sum(dim=1)
# print(out.shape) # [batch_size, hidden_size]
# batch, seq_len
out = torch.tanh(out)
return out, weights
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
# att(query, keys, values) = softmax(sim(query, keys)) * values
def forward(self, query, keys, values):
# query: 最终隐层状态,keys、values:lstm层输出且keys == values
# query: B × Q
# keys: <KEY>
# values: B × T × V
scale = np.sqrt(query.shape[1]) # 调节因子
# # 设Q=K(=hidden_size), [B, 1, Q] * [B, K, T] -> [B, 1, T]
# att_weights = torch.bmm(query.unsqueeze(1), keys.transpose(1, 2))
# soft_att_weights = F.softmax(att_weights.mul_(scale), dim=2)
# # [B, 1, T] * [B, T, V] -> [B, 1, V] -> [B, V]
# att_out = torch.bmm(att_weights, values).squeeze(1)
# [B, T, K] * [B, Q, 1] -> [B, T, 1]
att_weights = torch.bmm(keys, query.unsqueeze(2)).squeeze(2)
soft_att_weights = F.softmax(att_weights.mul_(scale), dim=1) # [B, T]
# [B, V, T] * [B, T, 1] -> [B, V, 1]
att_out = torch.bmm(values.transpose(1, 2), soft_att_weights).squeeze(2)
return att_out, soft_att_weights
class SentimentModel(nn.Module):
def __init__(self, vocab, config, embedding_weights):
super(SentimentModel, self).__init__()
self.config = config
embedding_dim = embedding_weights.shape[1] # vocab_size * embedding_dim
embed_init = torch.zeros((vocab.corpus_vocab_size, embedding_dim), dtype=torch.float32)
self.corpus_embeddings = nn.Embedding(num_embeddings=vocab.corpus_vocab_size,
embedding_dim=embedding_dim)
self.corpus_embeddings.weight.data.copy_(embed_init)
# self.corpus_embeddings.weight = nn.Parameter(embed_init)
self.corpus_embeddings.weight.requires_grad = True # 默认
self.wd2vec_embeddings = nn.Embedding.from_pretrained(torch.from_numpy(embedding_weights))
self.wd2vec_embeddings.weight.requires_grad = False
self.bidirectional = True
self.nb_directions = 2 if self.bidirectional else 1
self.rnn_encoder = RNNEncoder(input_size=embedding_dim, # 输入的特征维度
hidden_size=self.config.hidden_size, # 隐层状态的特征维度
num_layers=self.config.nb_layers, # LSTM 堆叠的层数,默认值是1层,如果设置为2,第二个LSTM接收第一个LSTM的计算结果
dropout=self.config.drop_rate, # 除了最后一层外,其它层的输出都会套上一个dropout层
bidirectional=self.bidirectional, # 是否为双向LSTM
batch_first=True) # [batch_size, seq, feature]
# self.self_attention = SelfAttention(self.nb_directions * self.config.hidden_size)
self.self_attention = SelfAttention(self.config.hidden_size)
self.dropout_embed = nn.Dropout(self.config.drop_embed_rate)
self.dropout = nn.Dropout(self.config.drop_rate)
# self.out = nn.Linear(self.nb_directions * self.config.hidden_size, vocab.tag_size)
self.out = nn.Linear(self.config.hidden_size, vocab.tag_size)
# Att = ht * h
def _attention(self, encoder_output, hidden_n):
# encoder_output: [batch_size, seq_len, hidden_size]
# hidden_n: [batch_size, hidden_size]
# [batch_size, seq_len, hidden_size] * [batch_size, hidden_size, 1]
# -> [batch_size, seq_len, 1] -> [batch_size, seq_len]
att_weights = torch.bmm(encoder_output, hidden_n.unsqueeze(2)).squeeze(2)
soft_att_weights = F.softmax(att_weights, dim=1) # [batch_size, seq_len]
# [batch_size, hidden_size, seq_len] * [batch_size, seq_len, 1]
# -> [batch_size, hidden_size, 1] -> [batch_size, hidden_size]
out = torch.bmm(encoder_output.transpose(1, 2), soft_att_weights.unsqueeze(2)).squeeze(2)
return out, soft_att_weights
def forward(self, inputs, wd2vec_inputs, mask): # (h0_state, c0_state)
corpus_embed = self.corpus_embeddings(inputs)
wd2vec_embed = self.wd2vec_embeddings(wd2vec_inputs)
embed = corpus_embed + wd2vec_embed
if self.training: # 训练过程中采用Dropout,预测时False
embed = self.dropout_embed(embed)
# r_out: (batch, max_seq_len, hidden_size * num_directions)
r_out, hidden = self.rnn_encoder(embed, mask) # None 表示0初始化
if self.bidirectional: # 前向输出和反向输出相加
r_out = r_out[:, :, :self.config.hidden_size] + r_out[:, :, self.config.hidden_size:]
# fw, bw = r_out.split(2, dim=2)
# r_out = fw + bw
# r_out = torch.cat((r_out[-1], r_out[-2]), dim=1)
out, weights = self.self_attention(r_out)
if self.training: # 训练过程中采用Dropout,预测时False
out = self.dropout(out)
out = self.out(out)
# out = F.log_softmax(self.out(x), dim=1)
return out, weights
|
442196
|
from __future__ import print_function
import psycopg2
import psycopg2.extras
import time
import datadb
from psycopg2.extensions import adapt
def getIndexesDataForTable(host, full_name, date_from, date_to):
conn = datadb.getDataConnection()
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(getSingleTableSql(host, full_name, date_from, date_to))
data = cur.fetchall()
cur.close()
datadb.closeDataConnection(conn)
all_data = []
d = { 'size' : [], 'scan' : [], 'tup_read': [] } #, 'tup_fetch' : [] }
last_scan = None
last_tup_read = None
# last_tup_fetch = None
last_name = None
last_index_size = 0
last_total_end_size = 0
last_pct_of_total_end_size = 0
for r in data:
if last_name != None:
if last_name != r['name'] and len(d['size']) > 0:
all_data.append({'index_name':last_name, 'data':d, 'last_index_size': round(last_index_size / 1024**2), 'total_end_size': round(last_total_end_size / 1024**2), 'pct_of_total_end_size':last_pct_of_total_end_size})
d = { 'size' : [], 'scan' : [], 'tup_read': [] } # , 'tup_fetch' : [] }
d['size'].append( ( r['timestamp'] , r['size'] ) )
d['scan'].append( ( r['timestamp'] , 0 if last_scan > r['scan'] else r['scan'] - last_scan ) )
d['tup_read'].append( ( r['timestamp'] , 0 if last_tup_read > r['tup_read'] else r['tup_read'] - last_tup_read ) )
# d['tup_fetch'].append( ( r['timestamp'] , 0 if last_tup_fetch > r['tup_fetch'] else r['tup_fetch'] - last_tup_fetch ) )
last_scan = r['scan']
last_tup_read = r['tup_read']
# last_tup_fetch = r['tup_fetch']
last_name = r['name']
last_index_size = r['size']
last_total_end_size = r['total_end_size']
last_pct_of_total_end_size = r['pct_of_total_end_size']
if len(data) > 0:
all_data.append({'index_name':last_name, 'data':d, 'last_index_size': round(last_index_size / 1024**2), 'total_end_size': round(last_total_end_size / 1024**2), 'pct_of_total_end_size':last_pct_of_total_end_size})
return all_data
def getSingleTableSql(host, full_table_name, date_from, date_to=None):
interval = "AND iud_timestamp BETWEEN %s::timestamp and %s::timestamp" % (adapt(date_from),adapt(date_to), )
if date_to==None:
interval = " AND iud_timestamp > %s::timestamp" % (adapt(date_from), )
schema = full_table_name.split('.')[0]
table = full_table_name.split('.')[1]
sql = """
WITH q_end_size AS (
SELECT iud_timestamp as end_timestamp,
sum(iud_size) as total_end_size
FROM monitor_data.index_usage_data,
monitor_data.indexes
WHERE i_host_id = """ + str(adapt(host)) + """
AND i_schema = """ + str(adapt(schema)) + """
AND iud_host_id = """ + str(adapt(host)) + """
AND i_table_name = """ + str(adapt(table)) + """
AND iud_index_id = i_id
"""+interval+"""
GROUP
BY iud_timestamp
ORDER
BY iud_timestamp DESC
LIMIT 1
)
SELECT iud_timestamp as timestamp,
i_name as name,
iud_scan as scan,
iud_tup_read as tup_read,
--iud_tup_fetch as tup_fetch,
iud_size as size,
total_end_size,
round (iud_size / total_end_size*100::numeric, 1) as pct_of_total_end_size
FROM monitor_data.index_usage_data,
monitor_data.indexes,
q_end_size
WHERE i_host_id = """ + str(adapt(host)) + """
AND i_schema = """ + str(adapt(schema)) + """
AND iud_host_id = """ + str(adapt(host)) + """
AND i_table_name = """ + str(adapt(table)) + """
AND iud_index_id = i_id
"""+interval+"""
ORDER BY i_name, iud_timestamp ASC
"""
return sql
|
442198
|
import os
import pickle
import numpy as np
import lenskit.util.test as lktu
from lenskit import sharing as lks
from lenskit.algorithms.als import BiasedMF
from pytest import mark
def test_sharing_mode():
"Ensure sharing mode decorator turns on sharing"
assert not lks.in_share_context()
with lks.sharing_mode():
assert lks.in_share_context()
assert not lks.in_share_context()
def test_persist_bpk():
matrix = np.random.randn(1000, 100)
share = lks.persist_binpickle(matrix)
try:
assert share.path.exists()
m2 = share.get()
assert m2 is not matrix
assert np.all(m2 == matrix)
del m2
finally:
share.close()
@mark.skipif(not lks.SHM_AVAILABLE, reason='shared_memory not available')
def test_persist_shm():
matrix = np.random.randn(1000, 100)
share = lks.persist_shm(matrix)
try:
m2 = share.get()
assert m2 is not matrix
assert np.all(m2 == matrix)
del m2
finally:
share.close()
def test_persist():
"Test default persistence"
matrix = np.random.randn(1000, 100)
share = lks.persist(matrix)
try:
m2 = share.get()
assert m2 is not matrix
assert np.all(m2 == matrix)
del m2
finally:
share.close()
def test_persist_dir(tmp_path):
"Test persistence with a configured directory"
matrix = np.random.randn(1000, 100)
with lktu.set_env_var('LK_TEMP_DIR', os.fspath(tmp_path)):
share = lks.persist(matrix)
assert isinstance(share, lks.BPKPersisted)
try:
m2 = share.get()
assert m2 is not matrix
assert np.all(m2 == matrix)
del m2
finally:
share.close()
def test_persist_method():
"Test persistence with a specified method"
matrix = np.random.randn(1000, 100)
share = lks.persist(matrix, method='binpickle')
assert isinstance(share, lks.BPKPersisted)
try:
m2 = share.get()
assert m2 is not matrix
assert np.all(m2 == matrix)
del m2
finally:
share.close()
def test_store_als():
algo = BiasedMF(10)
algo.fit(lktu.ml_test.ratings)
shared = lks.persist(algo)
k2 = pickle.loads(pickle.dumps(shared))
try:
a2 = k2.get()
assert a2 is not algo
assert a2.item_features_ is not algo.item_features_
assert np.all(a2.item_features_ == algo.item_features_)
assert a2.user_features_ is not algo.user_features_
assert np.all(a2.user_features_ == algo.user_features_)
del a2
k2.close()
del k2
finally:
shared.close()
|
442318
|
from __future__ import absolute_import
from datetime import datetime
from typing import NamedTuple, Optional
from changes.config import db
from changes.constants import Result, Status
from changes.models.jobstep import JobStep
from changes.utils.agg import aggregate_result
LXCConfig = NamedTuple('LXCConfig', [('compression', Optional[str]),
('release', Optional[str]),
('prelaunch', Optional[str]),
('postlaunch', Optional[str]),
('s3_bucket', Optional[str]),
('template', Optional[str]),
('mirror', Optional[str]),
('security_mirror', Optional[str])])
class BuildStep(object):
def can_snapshot(self):
return False
def get_label(self):
raise NotImplementedError
def get_resource_limits(self):
"""Return the resource limits that should be applied to individual executions.
The return value is expected to be a dict like:
{'cpus': 4, 'memory': 8000}
with 'cpus' and 'memory' indicating the number of CPUs and megabytes of memory needed
respectively. Both fields are optional.
Specifying these values don't guarantee you'll get them (or that you won't get more),
but it will be taken into account when scheduling jobs, and steps with lower limits
may be scheduled sooner.
"""
return {}
def get_lxc_config(self, jobstep):
"""
Get the LXC configuration, if the LXC adapter should be used.
Args:
jobstep (JobStep): The JobStep to get the LXC config for.
Returns:
LXCConfig: The config to use for this jobstep, or None.
"""
return None
def get_test_stats_from(self):
"""
Returns the project slug that test statistics should be retrieved from,
or None to use the current project.
"""
return None
def execute(self, job):
"""
Given a new job, execute it (either sync or async), and report the
results or yield to an update step.
"""
raise NotImplementedError
def create_replacement_jobstep(self, step):
"""Attempt to create a replacement of the given jobstep.
Returns new jobstep if successful, None otherwise."""
return None
def update(self, job):
raise NotImplementedError
def update_step(self, step):
raise NotImplementedError
def validate(self, job):
"""Called when a job is ready to be finished.
This is responsible for setting the job's final result. The base
implementation simply aggregates the phase results.
Args:
job (Job): The job being finished.
Returns:
None
"""
# TODO(josiah): ideally, we could record a FailureReason.
# Currently FailureReason must be per-step.
job.result = aggregate_result((p.result for p in job.phases))
def validate_phase(self, phase):
"""Called when a job phase is ready to be finished.
This is responsible for setting the phases's final result. The base
implementation simply aggregates the jobstep results.
Args:
phase (JobPhase): The phase being finished.
Returns:
None
"""
# TODO(josiah): ideally, we could record a FailureReason.
# Currently FailureReason must be per-step.
phase.result = aggregate_result((s.result for s in phase.current_steps))
# There's no finish_step because steps are only marked as finished/passed
# by update().
def cancel(self, job):
# XXX: this makes the assumption that sync_job will take care of
# propagating the remainder of the metadata
active_steps = JobStep.query.filter(
JobStep.job == job,
JobStep.status != Status.finished,
)
for step in active_steps:
self.cancel_step(step)
step.status = Status.finished
step.result = Result.aborted
step.date_finished = datetime.utcnow()
db.session.add(step)
db.session.flush()
def cancel_step(self, step):
raise NotImplementedError
def fetch_artifact(self, artifact):
raise NotImplementedError
def create_expanded_jobstep(self, jobstep, new_jobphase, future_jobstep):
raise NotImplementedError
def get_allocation_command(self, jobstep):
raise NotImplementedError
def get_artifact_manager(self, jobstep):
"""
Return an artifacts.manager.Manager object for the given jobstep.
This manager should be created with all artifact handlers that apply.
For instance, in a collection JobStep, you might wish to have only a
handler for a collection artifact, whereas in JobSteps that run tests,
you may wish to have handlers for test result files, coverage, etc.
Args:
jobstep: The JobStep in question.
"""
raise NotImplementedError
def prefer_artifactstore(self):
"""
Return true if we should prefer the artifact store artifacts over
those collected by Mesos/Jenkins.
"""
return False
def verify_final_artifacts(self, jobstep, artifacts):
"""
Called when a jobstep is finished but we haven't yet synced its artifacts.
Used to do any verification we might want, for instance checking for
required artifacts.
"""
@staticmethod
def handle_debug_infra_failures(jobstep, debug_config, phase_type):
"""
Uses the infra_failures debug_config to determine whether a JobStep
should simulate an infra failure, and sets the JobStep's data field
accordingly. (changes-client will then report an infra failure.)
Args:
jobstep: The JobStep in question.
debug_config: The debug_config for this BuildStep.
phase_type: The phase this JobStep is in. Either 'primary' or 'expanded'
"""
infra_failures = debug_config.get('infra_failures', {})
if phase_type in infra_failures:
percent = jobstep.id.int % 100
jobstep.data['debugForceInfraFailure'] = percent < infra_failures[phase_type] * 100
db.session.add(jobstep)
|
442319
|
import mobula.layers as L
import numpy as np
def go_convt(stride, pad):
print ("test ConvT: ", stride, pad)
X = np.random.random((2, 4, 4, 4)) * 100
N, D, NH, NW = X.shape
K = 3
C = 1
FW = np.random.random((D, C, K * K)) * 10
F = FW.reshape((D, C, K, K))
data = L.Data(X)
convT = L.ConvT(data, kernel = K, pad = pad, stride = stride, dim_out = C)
pad_h = pad_w = pad
kernel_h = kernel_w = K
OH = (NH - 1) * stride + kernel_h - pad_h * 2
OW = (NW - 1) * stride + kernel_w - pad_w * 2
data.reshape()
convT.reshape()
convT.W = FW
convT.b = np.random.random(convT.b.shape) * 10
# Conv: (OH, OW) -> (NH, NW)
# ConvT: (NH. NW) -> (OH, OW)
influence = [[[None for _ in range(kernel_h * kernel_w)] for _ in range(OW)] for _ in range(OH)]
for h in range(NH):
for w in range(NW):
for fh in range(kernel_h):
for fw in range(kernel_w):
ph = h * stride + fh
pw = w * stride + fw
oh = ph - pad_h
ow = pw - pad_w
if oh >= 0 and ow >= 0 and oh < OH and ow < OW:
influence[oh][ow][fh * kernel_w + fw] = (h, w)
ty = np.zeros((N, C, OH, OW))
dW = np.zeros(convT.W.shape)
dX = np.zeros(convT.X.shape)
dY = np.random.random(convT.Y.shape) * 100
# F = FW.reshape((D, C, K, K))
# N, D, NH, NW = X.shape
for i in range(N):
for c in range(C):
for oh in range(OH):
for ow in range(OW):
il = influence[oh][ow]
for t, pos in enumerate(il):
if pos is not None:
h,w = pos
for d in range(D):
ty[i, c, oh, ow] += X[i, d, h, w] * FW[d, c].ravel()[t]
dW[d, c].ravel()[t] += dY[i, c, oh, ow] * X[i, d, h, w]
dX[i, d, h, w] += dY[i, c, oh, ow] * FW[d, c].ravel()[t]
ty += convT.b.reshape((1, -1, 1, 1))
db = np.sum(dY, (0, 2, 3)).reshape(convT.b.shape)
convT.forward()
assert np.allclose(convT.Y, ty)
# test backward
# db, dw, dx
convT.dY = dY
convT.backward()
assert np.allclose(convT.db, db)
assert np.allclose(convT.dW, dW)
assert np.allclose(convT.dX, dX)
def test_conv():
go_convt(1, 0)
go_convt(2, 0)
go_convt(3, 0)
go_convt(1, 1)
go_convt(2, 1)
go_convt(3, 1)
|
442335
|
import pytest
import mock
import pandas as pd
from nesta.packages.decorators.schema_transform import schema_transform
from nesta.packages.decorators.schema_transform import schema_transformer
class TestSchemaTransform():
@staticmethod
@pytest.fixture
def test_data():
return [{"bad_col": 1, "another_bad_col": 2, "one_more_bad_col": -1}]
@staticmethod
@pytest.fixture
def test_transformer():
return {"bad_col": "good_col",
"another_bad_col": "another_good_col"}
@mock.patch('nesta.packages.decorators.schema_transform.load_transformer')
def test_dataframe_transform(self, mocked_loader, test_transformer, test_data):
mocked_loader.return_value = test_transformer
dummy_func = lambda : pd.DataFrame(test_data)
wrapper = schema_transform("dummy")
wrapped = wrapper(dummy_func)
transformed = wrapped()
assert len(transformed.columns) == len(test_transformer)
assert all(c in test_transformer.values()
for c in transformed.columns)
@mock.patch('nesta.packages.decorators.schema_transform.load_transformer')
def test_list_of_dict_transform(self, mocked_loader, test_transformer, test_data):
mocked_loader.return_value = test_transformer
dummy_func = lambda : test_data
wrapper = schema_transform("dummy")
wrapped = wrapper(dummy_func)
transformed = wrapped()
transformed = pd.DataFrame(transformed)
assert len(transformed.columns) == len(test_transformer)
assert all(c in test_transformer.values()
for c in transformed.columns)
@mock.patch('nesta.packages.decorators.schema_transform.load_transformer')
def test_invalid_type_transform(self, mocked_loader, test_transformer):
mocked_loader.return_value = test_transformer
dummy_func = lambda : None
wrapper = schema_transform("dummy")
wrapped = wrapper(dummy_func)
with pytest.raises(ValueError) as e:
wrapped()
assert "Schema transform expects" in str(e.value)
@mock.patch('nesta.packages.decorators.schema_transform.load_transformer')
def test_single_dict(self, mocked_loader, test_transformer):
mocked_loader.return_value = test_transformer
test_data = {'bad_col': 111, 'another_bad_col': 222, 'stuff': 333}
transformed = schema_transformer(test_data, filename='dummy')
assert transformed == {'good_col': 111, 'another_good_col': 222}
|
442336
|
from .korpus_aihub_translation import (
AIHubTranslationKorpus,
AIHubSpokenTranslationKorpus,
AIHubConversationTranslationKorpus,
AIHubNewsTranslationKorpus,
AIHubKoreanCultureTranslationKorpus,
AIHubDecreeTranslationKorpus,
AIHubGovernmentWebsiteTranslationKorpus,
fetch_aihub
)
from .korpus_aihub_kspon_speech import AIHubKsponSpeechKorpus
from .korpus_chatbot_data import KoreanChatbotKorpus, fetch_chatbot
from .korpus_kcbert import KcBERTKorpus, fetch_kcbert
from .korpus_korean_hate_speech import KoreanHateSpeechKorpus, fetch_korean_hate_speech
from .korpus_korean_parallel import KoreanParallelKOENNewsKorpus, fetch_korean_parallel_koen_news
from .korpus_korean_petitions import KoreanPetitionsKorpus, fetch_korean_petitions
from .korpus_kornli import KorNLIKorpus, fetch_kornli
from .korpus_korsts import KorSTSKorpus, fetch_korsts
from .korpus_kowiki import KowikiTextKorpus, fetch_kowikitext
from .korpus_namuwiki import NamuwikiTextKorpus, fetch_namuwikitext
from .korpus_naverchangwon_ner import NaverChangwonNERKorpus, fetch_naverchangwon_ner
from .korpus_nsmc import NSMCKorpus, fetch_nsmc
from .korpus_question_pair import QuestionPairKorpus, fetch_questionpair
from .korpus_open_subtitles import OpenSubtitleKorpus, fetch_open_subtitles
from .korpus_modu_news import ModuNewsKorpus, fetch_modu
from .korpus_modu_messenger import ModuMessengerKorpus
from .korpus_modu_morpheme import ModuMorphemeKorpus
from .korpus_modu_ne import ModuNEKorpus
from .korpus_modu_spoken import ModuSpokenKorpus
from .korpus_modu_web import ModuWebKorpus
from .korpus_modu_written import ModuWrittenKorpus
from .utils import default_korpora_path
class Korpora:
"""
Examples::
>>> from Korpora import Korpora
>>> nsmc = Korpora.load('nsmc')
>>> len(nsmc.train.texts) # 150000
>>> len(nsmc.train.labels) # 50000
"""
@classmethod
def load(cls, corpus_names, root_dir=None, force_download=False):
return_single = isinstance(corpus_names, str)
if return_single:
corpus_names = [corpus_names]
corpora = [KORPUS[corpus_name](root_dir, force_download) for corpus_name in corpus_names]
if return_single:
return corpora[0]
return corpora
@classmethod
def fetch(cls, corpus_name, root_dir=None, force_download=False):
if corpus_name.lower() == 'all':
corpus_name = sorted(FETCH.keys())
if isinstance(corpus_name, str):
corpus_name = [corpus_name]
corpus_name = [name for name in corpus_name if (name[:5] != 'modu_' and name[:6] != 'aihub_')]
for name in corpus_name:
if name not in FETCH:
raise ValueError(f'Support only f{sorted(FETCH.keys())}')
if root_dir is None:
root_dir = default_korpora_path
for name in corpus_name:
fetch_func = FETCH[name]
fetch_func(root_dir, force_download)
@classmethod
def corpus_list(cls):
return KORPUS_DESCRIPTION
@classmethod
def exists(cls, corpus_name, root_dir=None, return_by_each_corpus=False):
if (corpus_name == 'all') or (corpus_name[0] == 'all'):
corpus_name = sorted(KORPUS.keys())
elif isinstance(corpus_name, str):
corpus_name = [corpus_name]
if root_dir is None:
root_dir = default_korpora_path
corpora = [KORPUS[name].exists(root_dir=root_dir) for name in corpus_name]
if return_by_each_corpus:
return corpora
return all(corpora)
KORPUS = {
'kcbert': KcBERTKorpus,
'korean_chatbot_data': KoreanChatbotKorpus,
'korean_hate_speech': KoreanHateSpeechKorpus,
'korean_parallel_koen_news': KoreanParallelKOENNewsKorpus,
'korean_petitions': KoreanPetitionsKorpus,
'kornli': KorNLIKorpus,
'korsts': KorSTSKorpus,
'kowikitext': KowikiTextKorpus,
'namuwikitext': NamuwikiTextKorpus,
'naver_changwon_ner': NaverChangwonNERKorpus,
'nsmc': NSMCKorpus,
'question_pair': QuestionPairKorpus,
'modu_news': ModuNewsKorpus,
'modu_messenger': ModuMessengerKorpus,
'modu_mp': ModuMorphemeKorpus,
'modu_ne': ModuNEKorpus,
'modu_spoken': ModuSpokenKorpus,
'modu_web': ModuWebKorpus,
'modu_written': ModuWrittenKorpus,
'open_subtitles': OpenSubtitleKorpus,
'aihub_translation': AIHubTranslationKorpus,
'aihub_spoken_translation': AIHubSpokenTranslationKorpus,
'aihub_conversation_translation': AIHubConversationTranslationKorpus,
'aihub_news_translation': AIHubNewsTranslationKorpus,
'aihub_korean_culture_translation': AIHubKoreanCultureTranslationKorpus,
'aihub_decree_translation': AIHubDecreeTranslationKorpus,
'aihub_government_website_translation': AIHubGovernmentWebsiteTranslationKorpus,
'aihub_kspon_speech_scripts': AIHubKsponSpeechKorpus,
}
KORPUS_DESCRIPTION = {
'kcbert': "beomi@github 님이 만드신 KcBERT 학습데이터",
'korean_chatbot_data': "songys@github 님이 만드신 챗봇 문답 데이터",
'korean_hate_speech': "{inmoonlight,warnikchow,beomi}@github 님이 만드신 혐오댓글데이터",
'korean_parallel_koen_news': "jungyeul@github 님이 만드신 병렬 말뭉치",
'korean_petitions': "lovit@github 님이 만드신 2017.08 ~ 2019.03 청와대 청원데이터",
'kornli': "KakaoBrain 에서 제공하는 Natural Language Inference (NLI) 데이터",
'korsts': "KakaoBrain 에서 제공하는 Semantic Textual Similarity (STS) 데이터",
'kowikitext': "lovit@github 님이 만드신 wikitext 형식의 한국어 위키피디아 데이터",
'namuwikitext': "lovit@github 님이 만드신 wikitext 형식의 나무위키 데이터",
'naver_changwon_ner': "네이버 + 창원대 NER shared task data",
'nsmc': "e9t@github 님이 만드신 Naver sentiment movie corpus v1.0",
'question_pair': "songys@github 님이 만드신 질문쌍(Paired Question v.2)",
'modu_news': '국립국어원에서 만든 모두의 말뭉치: 뉴스 말뭉치',
'modu_messenger': '국립국어원에서 만든 모두의 말뭉치: 메신저 말뭉치',
'modu_mp': '국립국어원에서 만든 모두의 말뭉치: 형태 분석 말뭉치',
'modu_ne': '국립국어원에서 만든 모두의 말뭉치: 개체명 분석 말뭉치',
'modu_spoken': '국립국어원에서 만든 모두의 말뭉치: 구어 말뭉치',
'modu_web': '국립국어원에서 만든 모두의 말뭉치: 웹 말뭉치',
'modu_written': '국립국어원에서 만든 모두의 말뭉치: 문어 말뭉치',
'open_subtitles': 'Open parallel corpus (OPUS) 에서 제공하는 영화 자막 번역 병렬 말뭉치',
'aihub_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (구어 + 대화 + 뉴스 + 한국문화 + 조례 + 지자체웹사이트)",
'aihub_spoken_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (구어)",
'aihub_conversation_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (대화)",
'aihub_news_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (뉴스)",
'aihub_korean_culture_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (한국문화)",
'aihub_decree_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (조례)",
'aihub_government_website_translation': "AI Hub 에서 제공하는 번역용 병렬 말뭉치 (지자체웹사이트)",
'aihub_kspon_speech_scripts': "AI Hub 에서 제공하는 한국어 음성 오디오 말뭉치 (전사)",
}
FETCH = {
'kcbert': fetch_kcbert,
'korean_chatbot_data': fetch_chatbot,
'korean_hate_speech': fetch_korean_hate_speech,
'korean_parallel_koen_news': fetch_korean_parallel_koen_news,
'korean_petitions': fetch_korean_petitions,
'kornli': fetch_kornli,
'korsts': fetch_korsts,
'kowikitext': fetch_kowikitext,
'namuwikitext': fetch_namuwikitext,
'naver_changwon_ner': fetch_naverchangwon_ner,
'nsmc': fetch_nsmc,
'question_pair': fetch_questionpair,
'modu_news': fetch_modu,
'modu_messenger': fetch_modu,
'modu_mp': fetch_modu,
'modu_ne': fetch_modu,
'modu_spoken': fetch_modu,
'modu_web': fetch_modu,
'modu_written': fetch_modu,
'open_subtitles': fetch_open_subtitles,
'aihub_translation': fetch_aihub,
'aihub_spoken_translation': fetch_aihub,
'aihub_conversation_translation': fetch_aihub,
'aihub_news_translation': fetch_aihub,
'aihub_korean_culture_translation': fetch_aihub,
'aihub_decree_translation': fetch_aihub,
'aihub_government_website_translation': fetch_aihub,
'aihub_kspon_speech_scripts': fetch_aihub,
}
|
442381
|
from .basic import get_page
from .status import get_cont_of_weibo
from .user import (get_profile, get_fans_or_followers_ids, get_user_profile,
get_newcard_by_name)
|
442399
|
from lazydata.cli.commands.BaseCommand import BaseCommand
from lazydata.config.config import Config
class AddSourceCommand(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('path', type=str, help='Path of the file')
parser.add_argument('source_url', type=str, help='URL of the source of file')
return parser
def handle(self, args):
config = Config()
source_url = args.source_url
path = args.path
entry, _ = config.get_latest_and_all_file_entries(path=path)
config.add_source(entry=entry, source_url=source_url)
|
442402
|
description = 'Stanford SR-850 lock-in amplifier, for susceptibility measurements'
group = 'optional'
includes = ['base']
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
M = device('nicos_mlz.mira.devices.sr850.Amplifier',
description = 'SR850 lock-in amplifier',
tangodevice = tango_base + 'sr850/io',
),
M2 = device('nicos_mlz.mira.devices.sr850.Amplifier',
description = 'SR850 lock-in amplifier',
tangodevice = tango_base + 'sr850/io2',
),
)
startupcode = '''
SetDetectors(M, M2)
'''
|
442410
|
from fastapi import APIRouter
from .models import ServiceInfo
from .workflow import get_queue_name
router_workflow = APIRouter()
@router_workflow.get('/')
def touch():
return 'API is running'
@router_workflow.get('/{service_id}', response_model=ServiceInfo, status_code=202)
def exec_workflow_queue_name(service_id):
queue_name = get_queue_name(service_id)
return {'service_id': service_id,
'queue_name': queue_name}
|
442413
|
import numpy as np
from scipy import signal
from misc.geometry import *
from misc.numpy_utils import NumpyUtils
from osu.local.beatmap.beatmap import Beatmap
from analysis.osu.mania.map_data import ManiaMapData
from analysis.osu.mania.action_data import ManiaActionData
class ManiaMapMetrics():
"""
Raw metrics
"""
@staticmethod
def calc_press_rate(action_data, col=None, window_ms=1000):
"""
Calculates presses per second across all columns within indicated ``window_ms`` of time.
Has a moving that shifts to next note occuring on new timing
Parameters
----------
action_data : numpy.array
Action data from ``ManiaMapData.get_action_data``
col : int
Column to calculated presses per second for
window_ms : int
Duration in milliseconds for which actions are counted up
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, aps)``. ``times`` are timings corresponding to recorded actions per second.
``aps`` are actions per second at indicated time.
"""
times, aps = [], []
if col != None:
action_data = action_data[col]
for timing in action_data.index:
actions_in_range = action_data.loc[timing - window_ms : timing]
num_actions = (actions_in_range == ManiaActionData.PRESS).to_numpy().sum()
times.append(timing)
aps.append(1000*num_actions/window_ms)
return np.asarray(times), np.asarray(aps)
@staticmethod
def calc_note_intervals(action_data, col):
"""
Gets the duration (time interval) between each note in the specified ``col``
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
col : int
Which column number to get note intervals for
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(start_times, intervals)``. ``start_times`` are timings corresponding to start of notes.
``intervals`` are the timings difference between current and previous notes' starting times.
Resultant array size is ``len(hitobject_data) - 1``.
"""
press_timings = action_data.index[action_data[col] == ManiaActionData.PRESS]
if len(press_timings) < 2: return [], []
return press_timings[1:].to_numpy(), np.diff(press_timings.to_numpy())
@staticmethod
def calc_max_press_rate_per_col(action_data, window_ms=1000):
"""
Takes which column has max presses per second within indicated ``window_ms`` of time
Parameters
----------
action_data : numpy.array
Action data from ``ManiaMapData.get_action_data``
window_ms : int
Duration in milliseconds for which actions are counted up
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(times, max_aps_per_col)``. ``times`` are timings corresponding to recorded actions per second.
``max_aps_per_col`` are max actions per second at indicated time.
"""
times, aps = [], []
# iterate through timings
for timing in action_data.index:
aps_per_col = []
# iterate through columns
for _, data in action_data.loc[timing - window_ms : timing].iteritems():
num_actions = (data == ManiaActionData.PRESS).to_numpy().sum()
aps_per_col.append(1000*num_actions/window_ms)
times.append(timing)
aps.append(max(aps_per_col))
return np.asarray(times), np.asarray(aps)
@staticmethod
def filter_single_note_releases(action_data):
"""
Removes releases associated with single notes by setting them to FREE
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
filtered action_data
"""
filtered_action_data = action_data.copy()
# Operate per column (because idk how to make numpy operate on all columns like this)
for col in range(ManiaActionData.num_keys(action_data)):
# For current column, get where PRESS and RELEASE occur
release_timings = action_data.index[action_data[col] == ManiaActionData.RELEASE]
press_timings = action_data.index[action_data[col] == ManiaActionData.PRESS]
# For filtering out releases associated with single notes
# (assumes single note press interval is 1 ms)
non_release = (release_timings - press_timings) <= 1
filtered_action_data.loc[release_timings[non_release]] = 0
return filtered_action_data
@staticmethod
def detect_presses_during_holds(action_data):
"""
Masks presses that occur when there is at least one hold in one of the columns
This is useful for determining which presses are harder due to finger independence.
Holds have a tendency to make affected fingers slower or less accurate to press.
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data mask of actions detected
"""
press_mask = (action_data == ManiaActionData.PRESS).to_numpy()
press_mask_any = np.any(action_data == ManiaActionData.PRESS, 1)
hold_mask_any = np.any(action_data == ManiaActionData.HOLD, 1)
press_and_hold = np.logical_and(press_mask_any, hold_mask_any)
press_mask = press_and_hold[:, None] * press_mask
return press_mask
@staticmethod
def detect_holds_during_release(action_data):
"""
Masks holds that occur when there is at least one release in one of the columns
This is useful for determining which holds are harder due to finger independence.
Releases have a tendency to make affected fingers release prematurely.
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data mask of actions detected
"""
hold_mask = (action_data == ManiaActionData.HOLD).to_numpy()
release_mask_any = np.any(action_data == ManiaActionData.RELEASE, 1)
hold_mask_any = np.any(action_data == ManiaActionData.HOLD, 1)
release_and_hold = np.logical_and(release_mask_any, hold_mask_any)
hold_mask = release_and_hold[:, None] * hold_mask
return hold_mask
@staticmethod
def detect_hold_notes(action_data):
"""
Masks hold notes; removes single notes from data.
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data mask of actions detected
"""
hold_note_mask = action_data.copy()
# Operate per column (because idk how to make numpy operate on all columns like this)
for col in range(ManiaActionData.num_keys(action_data)):
# For current column, get where PRESS and RELEASE occur
release_timings = action_data.index[action_data[col] == ManiaActionData.RELEASE]
press_timings = action_data.index[action_data[col] == ManiaActionData.PRESS]
# Filter out idx in where_release_timing and where_press_timing that are 1 or less ms apart
# (assumes single note press interval is 1 ms)
hold_note_start_mask = (release_timings - press_timings) > 1
# Since we want to also include HOLD actions, let's assign 2 to PRESS and RELEASE actions associated
# with hold notes so everything else can later be easily filtered out.
hold_note_mask[col].loc[release_timings[hold_note_start_mask]] = 2
hold_note_mask[col].loc[press_timings[hold_note_start_mask]] = 2
# Filter out everthing else
hold_note_mask[col][hold_note_mask[col] != 2] = 0
# Set all the 2's to 1's
hold_note_mask[col][hold_note_mask[col] == 2] = 1
return hold_note_mask
@staticmethod
def data_to_press_durations(action_data):
"""
Takes action_data, and turns it into time intervals since last press.
For example,
::
[138317., 1., 0.],
[138567., 3., 0.],
[138651., 1., 1.],
[138901., 2., 2.],
[138984., 2., 2.],
[139234., 3., 3.],
becomes
::
[138317., 0., 0. ],
[138567., 0., 0. ],
[138651., 334., 0. ],
[138901., 0., 0. ],
[138984., 0., 0. ],
[139234., 0., 0. ],
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data with intervals between presses
"""
# Make a copy of the data and keep just the timings
press_intervals_data = action_data.copy()
press_intervals_data[:] = 0
# Operate per column (because idk how to make numpy operate on all columns like this)
for col in range(ManiaActionData.num_keys(action_data)):
# Get timings for PRESS
press_timings = action_data.index[action_data[col] == ManiaActionData.PRESS]
# This contains a list of press intervals. The locations of the press intervals are
# resolved via where_press_timing starting with the second press
press_intervals = press_timings[1:] - press_timings[:-1]
# Now fill in the blank data with press intervals
press_intervals_data[col].loc[press_timings[1:]] = press_intervals
return press_intervals_data
@staticmethod
def data_to_hold_durations(action_data):
"""
Takes action_data, filters out non hold notes, and reduces them to
durations they last for. For example,
::
[138317., 1., 0.],
[138567., 3., 0.],
[138651., 1., 1.],
[138901., 2., 2.],
[138984., 2., 2.],
[139234., 3., 3.],
becomes
::
[138317., 250., 0. ],
[138567., 0., 0. ],
[138651., 583., 583.],
[138901., 0., 0. ],
[138984., 0., 0. ],
[139234., 0., 0. ],
.. note:: This does not filter out single notes and
will show process single note press/release times as well
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data with hold note durations
"""
# Make a copy of the data and keep just the timings
hold_note_duration_data = action_data.copy()
hold_note_duration_data[:] = 0
# Make another copy of the data to have just stuff related to hold notes
hold_note_mask = ManiaMapMetrics.detect_hold_notes(action_data)
hold_note_data = action_data.copy()
# Keep just the information associated with hold notes
hold_note_data[~hold_note_mask.astype(np.bool, copy=False)] = 0
# Operate per column (because idk how to make numpy operate on all columns like this)
for col in range(ManiaActionData.num_keys(action_data)):
# For current column, get where PRESS and RELEASE occur
press_timings = action_data.index[action_data[col] == ManiaActionData.PRESS]
release_timings = action_data.index[action_data[col] == ManiaActionData.RELEASE]
# This contains a list of hold note durations. The locations of the hold note durations are
# resolved via where_press_timing
hold_note_durations = release_timings - press_timings
# Now fill in the blank data with hold note durations
hold_note_duration_data[col].loc[release_timings] = hold_note_durations
return hold_note_duration_data
@staticmethod
def data_to_anti_press_durations(action_data):
"""
Takes action_data, and reduces them to durations of anti-presses. Anti-presses
are associated with points in LN type patterns where there is a spot between
two holdnotes where the finger is released. For example,
::
[138317., 1., 0.],
[138567., 3., 0.],
[138651., 1., 1.],
[138901., 2., 2.],
[138984., 2., 2.],
[139234., 3., 3.],
becomes
::
[138317., 0., 0. ],
[138567., 84., 0. ],
[138651., 0., 0. ],
[138901., 0., 0. ],
[138984., 0., 0. ],
[139234., 0., 0. ],
.. note:: This does not filter out single notes and
will show process single note press/release times as well
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data with hold note durations
"""
# Make a copy of the data and keep just the timings
anti_press_duration_data = action_data.copy()
anti_press_duration_data[:] = 0
# Make another copy of the data to have just stuff related to hold notes
hold_note_mask = ManiaMapMetrics.detect_hold_notes(action_data)
hold_note_data = action_data.copy()
# Keep just the information associated with hold notes
hold_note_data[~hold_note_mask.astype(np.bool, copy=False)] = 0
# Operate per column (because idk how to make numpy operate on all columns like this)
for col in range(ManiaActionData.num_keys(action_data)):
# Get timings for those PRESS and RELEASE. We drop the last release timing because
# There is no press after that, hence no anti-press. We drop the first press timing
# because there is no release before that, hence no anti-press
press_timings = action_data.index[action_data[col] == ManiaActionData.PRESS]
release_timings = action_data.index[action_data[col] == ManiaActionData.RELEASE]
# This contains a list of anti-press durations. The locations of the anti-press durations are
# resolved via where_release_timing
anti_press_durations = press_timings[1:] - release_timings[:-1]
# Now fill in the blank data with anti-press durations
anti_press_duration_data[col].loc[press_timings[1:]] = anti_press_durations
return anti_press_duration_data
@staticmethod
def detect_inverse(action_data):
"""
Masks notes that are detected as inverses
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data mask of actions detected
"""
inverse_mask = action_data.copy()
inverse_mask[:] = 0
# Ratio of release to hold duration that qualifies as inverse
# For example 0.6 - Release duration needs to be 0.6*hold_duration to qualify as inverse
ratio_free_to_hold = 0.6
anti_press_durations = ManiaMapMetrics.data_to_anti_press_durations(action_data)
hold_press_durations = ManiaMapMetrics.data_to_hold_durations(action_data)
# Go through each column on left hand
for col in range(ManiaActionData.num_keys(action_data)):
anti_press_durations_col = anti_press_durations[col].to_numpy()
hold_press_durations_col = hold_press_durations[col].to_numpy()
# For filtering out timings with FREE
is_anti_press = anti_press_durations_col != ManiaActionData.FREE
is_hold_press = hold_press_durations_col != ManiaActionData.FREE
# Compare release duration against hold durations of previous and next hold notes
free_ratio_prev_hold = anti_press_durations_col[is_anti_press] <= ratio_free_to_hold*hold_press_durations_col[is_hold_press][:-1]
free_ratio_next_hold = anti_press_durations_col[is_anti_press] <= ratio_free_to_hold*hold_press_durations_col[is_hold_press][1:]
is_inverse = np.logical_and(free_ratio_prev_hold, free_ratio_next_hold)
# Resolve inverse location and assign
where_inverse = np.where(is_anti_press)[0][is_inverse]
inverse_mask[col].iloc[where_inverse] = 1
return inverse_mask
@staticmethod
def detect_chords(action_data):
"""
Masks note that are detected as chords
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data mask of actions detected that correspond to chord patterns. 1 if chord pattern 0 otherwise
"""
'''
A note is chord if:
- It is among 3 or more other notes in same action
- TODO: It is among 3 or more other notes in range of actions within tolerance interval
'''
presses = action_data[action_data == ManiaActionData.PRESS]
#for action in mask:
# if len(presses) < 3: action[1:][action[1:] == ManiaActionData.PRESS] = 0
#return mask
@staticmethod
def detect_jacks(action_data):
"""
Masks note that are detected as jacks
Parameters
----------
action_data : numpy.array
Action data from ``ManiaActionData.get_action_data``
Returns
-------
numpy.array
action_data mask of actions detected that correspond to jack patterns. 1 if jack pattern 0 otherwise
"""
mask = action_data.copy()
state = np.zeros(action_data.shape[1])
#for i in range(1, len(action_data)):
# state = np.logical_and(np.logical_or(action_data.iloc[i - 1], state), np.logical_or(action_data.iloc[i], ~np.any(action_data.iloc[i])))
# mask[i, 1:] = np.logical_and(action_data[i, 1:], state)
return mask
@staticmethod
def calc_notes_per_sec(hitobject_data, column=None):
"""
Gets average note rate with window of 1 second throughout the beatmap in the specified ``column``
Parameters
----------
hitobject_data : numpy.array
Hitobject data from ``ManiaMapData.get_hitobject_data``
column : int
Which column number to get average note rate for. If left blank, interprets all columns as one.
Returns
-------
(numpy.array, numpy.array)
Tuple of ``(start_times, notes_per_sec)``. ``start_times`` are timings corresponding to start of notes.
``notes_per_sec`` are average note rates at ``start_times`` point in time. Resultant array size is
``len(hitobject_data) - 1``.
"""
if column == None:
start_times = ManiaMapData.start_times(hitobject_data)
mask, filtered_start_times, processed_start_times = NumpyUtils.mania_chord_to_jack(start_times)
if len(start_times) < 2: return [], []
intervals = 1000/(processed_start_times[1:] - filtered_start_times[:-1])
return start_times[mask == 0][1:], intervals
else:
start_times = ManiaMapData.start_times(hitobject_data, column)
if len(start_times) < 2: return [], []
intervals = 1000/np.diff(start_times)
return start_times[1:], intervals
@staticmethod
def calc_avg_nps_col(hitobject_data, time, ms_window, column):
"""
Gets average notes with window of ``ms_window`` for the specified ``column`` at time ``time``
Parameters
----------
hitobject_data : numpy.array
Hitobject data from ``ManiaMapData.get_hitobject_data``
time: int
Time to calculate notes per second for
ms_window: int
Milliseconds back in time to take account
column : int
Which column number to get average note rate for
Returns
-------
float
Average notes per second for specified column
"""
start_times = ManiaMapData.start_times(hitobject_data, column)
start_times = start_times[time - ms_window <= start_times <= time]
intervals = np.diff(start_times)/1000
return np.mean(intervals)
@staticmethod
def calc_avg_nps(hitobject_data, time, ms_window):
"""
Gets average notes with window of ``ms_window`` for all columns at time ``time``
Parameters
----------
hitobject_data : numpy.array
Hitobject data from ``ManiaMapData.get_hitobject_data``
time: int
Time to calculate notes per second for
ms_window: int
Milliseconds back in time to take account
Returns
-------
float
Average notes per second
"""
avg_nps = np.asarray([ ManiaMapMetrics.calc_avg_nps_col(hitobject_data, time, ms_window, column) for column in len(hitobject_data) ])
return np.mean(avg_nps)
@staticmethod
def to_binary_signal(hitobject_data, tap_duration=25):
"""
Returns a binary signal indicating press or release for the specified
column at the ms resolution specified
tap_duration: Length of a single tap
"""
end_time = ManiaMapData.end_times(hitobject_data)[-1]
signals = np.zeros((len(hitobject_data), end_time))
for column in range(len(hitobject_data)):
for x,y in ManiaMapData.start_end_times(hitobject_data, column):
if x == y: y += tap_duration
signals[column][x:y] = 1
return np.arange(end_time), signals
@staticmethod
def hand_hold(hitobject_data, min_release=150):
"""
Dermines on a scale from 0.0 to 1.0 how likely a player can't raise their hand
Returns two values, for left and right hand
time: time to calculate notes per second for
ms_window: how many ms back in time to take account
"""
time, signals = ManiaMapMetrics.to_binary_signal(hitobject_data, tap_duration=25)
kernel = np.ones(min_release)
conv = np.apply_along_axis(lambda data: np.convolve(data, kernel, mode='same'), axis=1, arr=signals)
# TODO: kernel_left, kernel_right; size: int(len(conv)/2)
kernel = [[1],
[1]]
# Valid because we need to conv multiple columns into one array indicating whether hand will be held down
conv_left = signal.convolve2d(conv[:int(len(conv)/2)], kernel, 'valid')
conv_left = np.clip(conv_left, 0, 1)
conv_right = signal.convolve2d(conv[int(len(conv)/2):], kernel, 'valid')
conv_right = np.clip(conv_right, 0, 1)
return time, conv_left[0], conv_right[0]
@staticmethod
def hand_hold_ratio(hitobject_data, min_release=150):
time, hand_hold_left, hand_hold_right = ManiaMapMetrics.hand_hold(hitobject_data, min_release)
left_ratio = sum(hand_hold_left)/len(hand_hold_left)
right_ratio = sum(hand_hold_right)/len(hand_hold_right)
return left_ratio, right_ratio
|
442467
|
import torch.nn.utils as tnnu
import torch.utils.data as tud
import models.dataset as md
from models.actions import Action
from models.actions.collect_stats_from_model import CollectStatsFromModel
from running_modes.configurations import TransferLearningConfiguration
from running_modes.enums import GenerativeModelRegimeEnum
from running_modes.transfer_learning.logging.base_transfer_learning_logger import BaseTransferLearningLogger
class TrainModel(Action):
def __init__(self, model, configuration: TransferLearningConfiguration, optimizer, training_sets, validation_sets,
lr_scheduler, logger: BaseTransferLearningLogger):
"""
Initializes the training of an epoch.
: param model: A model instance, not loaded in scaffold_decorating mode.
: param optimizer: The optimizer instance already initialized on the model.
: param training_sets: An iterator with all the training sets (scaffold, decoration) pairs.
: param batch_size: Batch size to use.
: param clip_gradient: Clip the gradients after each backpropagation.
: return:
"""
Action.__init__(self, logger)
self.model = model
self.config = configuration
self.optimizer = optimizer
self.training_sets = training_sets
self.validation_sets = validation_sets
self.lr_scheduler = lr_scheduler
self.model_regime_enum = GenerativeModelRegimeEnum()
def run(self):
for epoch, training_set, validation_set in zip(range(1, self.config.epochs + 1), self.training_sets,
self.validation_sets):
dataloader = self._initialize_dataloader(training_set)
# iterate over training batch
for scaffold_batch, decorator_batch in dataloader:
loss = self.model.likelihood(*scaffold_batch, *decorator_batch).mean()
self.optimizer.zero_grad()
loss.backward()
if self.config.clip_gradients > 0:
tnnu.clip_grad_norm_(self.model.network.parameters(), self.config.clip_gradients)
self.optimizer.step()
# Get stats and logs
self.collect_stats(epoch=epoch, training_set=training_set, validation_set=validation_set)
# update LR
self.lr_scheduler.step()
# determine if training should continue
terminate = self.checkpoint(self.lr_scheduler.optimizer.param_groups[0]["lr"], epoch)
if terminate:
self.model.save(f"{self.config.output_path}/trained.{epoch}")
break
def collect_stats(self, epoch, training_set, validation_set):
self.model.set_mode(self.model_regime_enum.INFERENCE)
stats = CollectStatsFromModel(model=self.model, epoch=epoch, sample_size=self.config.sample_size,
training_set=training_set, validation_set=validation_set).run()
self.logger.log_timestep(lr=self.lr_scheduler.optimizer.param_groups[0]["lr"], epoch=epoch,
training_smiles=stats['sampled_training_mols'],
validation_smiles=stats['sampled_validation_mols'],
validation_nlls=stats['validation_nlls'],
training_nlls=stats['training_nlls'],
jsd_data_no_bins=stats['unbinned_jsd'],
jsd_data_bins=stats['binned_jsd'],
model=self.model)
self.model.set_mode(self.model_regime_enum.TRAINING)
def checkpoint(self, lr, epoch):
terminate_flag = False
if lr < self.config.learning_rate.min:
self.logger.log_message("Reached LR minimum. Terminating.")
terminate_flag = True
elif self.config.epochs == epoch:
self.logger.log_message(f"Reached maximum number of epochs ({epoch}). Saving and terminating.")
terminate_flag = True
elif self.config.save_frequency > 0 and (epoch % self.config.save_frequency == 0):
self.model.save(f"{self.config.output_path}/trained.{epoch}")
self.logger.log_message(f"Checkpoint after epoch {epoch}. Saving the model.")
return terminate_flag
def _initialize_dataloader(self, training_set):
dataset = md.DecoratorDataset(training_set, vocabulary=self.model.vocabulary)
return tud.DataLoader(dataset, batch_size=self.config.batch_size, shuffle=True,
collate_fn=md.DecoratorDataset.collate_fn, drop_last=True)
|
442496
|
import asyncio
import pathlib
import ssl
import websockets
import os
import time
import sys
import re
import traceback
import wave
import helper as helper
from google.cloud import speech_v1p1beta1 as speech
from six.moves import queue
from audio_stream import ResumableMediaStream
from config import cfg
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import Process, Queue, Pipe, Value, Manager
import json
# dec = opuslib.Decoder(cfg.SAMPLE_RATE, cfg.CHANNELS)
# import logging
# logger = logging.getLogger("asyncio").setLevel(logging.WARNING)
# logger.addHandler(logging.StreamHandler())
def get_current_time():
"""Return Current Time in MS."""
return int(round(time.time() * 1000))
# Audio recording parameters
RED = "\033[0;31m"
GREEN = "\033[0;32m"
YELLOW = "\033[0;33m"
# async def say_after(delay, what):
# await asyncio.sleep(delay)
# print(what)
def listen_print_loop(responses, stream, parent_conn, stream_closed_flag):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
for response in responses:
# Assign stream closed flag to stream closed
stream.closed = bool(stream_closed_flag.value)
if stream.closed is True:
print("Breaking out of loop")
break
if get_current_time() - stream.start_time > cfg.STREAMING_LIMIT:
stream.start_time = get_current_time()
break
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
result_seconds = 0
result_nanos = 0
if result.result_end_time.seconds:
result_seconds = result.result_end_time.seconds
if result.result_end_time.microseconds:
result_nanos = result.result_end_time.microseconds * 1000
stream.result_end_time = int((result_seconds * 1000) + (result_nanos / 1000000))
corrected_time = (
stream.result_end_time
- stream.bridging_offset
+ (cfg.STREAMING_LIMIT * stream.restart_counter)
)
# uncomment next line if want to Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
# parent_conn.send(transcript)
if result.is_final:
# Send transcript to parent process through pipe
parent_conn.send(transcript)
sys.stdout.write(str(corrected_time) + ": " + transcript + "\n")
stream.is_final_end_time = stream.result_end_time
stream.last_transcript_was_final = True
else:
stream.last_transcript_was_final = False
def transcription_loop(
audio_buffer, parent_conn, stream_closed_flag, audio_recording_frames, lang_code
):
# await asyncio.sleep(4)
# audio_recording_frames = []
audio_manager = ResumableMediaStream(
cfg.SAMPLE_RATE,
cfg.CHUNK_SIZE,
audio_buffer,
stream_closed_flag,
audio_recording_frames,
)
client = speech.SpeechClient()
config = speech.types.RecognitionConfig(
encoding=cfg.ENCODING,
sample_rate_hertz=cfg.SAMPLE_RATE,
enable_automatic_punctuation=cfg.ENABLE_AUTOMATIC_PUNCTUATION,
language_code=cfg.LANGUAGE_CODE,
max_alternatives=1,
)
streaming_config = speech.types.StreamingRecognitionConfig(
config=config, interim_results=True
)
try:
with audio_manager as stream:
while not stream.closed:
stream.closed = bool(stream_closed_flag.value)
print("Value of stream.closed flag: ", stream.closed)
# sys.stdout.write(YELLOW)
sys.stdout.write(
"\n"
+ str(cfg.STREAMING_LIMIT * stream.restart_counter)
+ ": NEW REQUEST\n"
)
stream.audio_input = []
audio_generator = stream.generator()
requests = (
speech.types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator
)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
listen_print_loop(responses, stream, parent_conn, stream_closed_flag)
if stream.result_end_time > 0:
stream.final_request_end_time = stream.is_final_end_time
stream.result_end_time = 0
stream.last_audio_input = []
stream.last_audio_input = stream.audio_input
stream.audio_input = []
stream.restart_counter = stream.restart_counter + 1
if not stream.last_transcript_was_final:
sys.stdout.write("\n")
stream.new_stream = True
except Exception as error:
print("Error in transcription service: ", error)
stream_closed_flag.value = True
print("\n\n***Transcription process closed ")
stream_closed_flag.value = True
async def on_data(websocket, path):
print("\n****New Websocket connection Established ", flush=True)
jsonDataString = await websocket.recv()
#print("\n\n****jsonData:", jsonDataString, flush=True)
jsonData = json.loads(jsonDataString)
print("\n\n****jsonData:", jsonData, flush=True)
if jsonData["cmd"] != "start":
print("Error in initial packet start packet not found", flush=True)
return 0
if jsonData["origin"] != "mic" and jsonData["origin"] != "speaker":
print("Error in initial packet: Incorrect origin", flush=True)
return 0
data_origin = jsonData["origin"]
if "conversation_id" not in jsonData:
print("Error in initial packet conversation_id not found", flush=True)
#return 0
if "lang" not in jsonData:
print("Language setting not found", flush=True)
language_code = jsonData["lang"]
#jsonData["lang"] = "en-US"
jsonData["need_punctuation"] = False
await websocket.send(json.dumps(jsonData))
conversation_id = jsonData["conversation_id"]
## Make audio manager
audio_buffer = Queue()
parent_conn, child_conn = Pipe()
stream_closed_flag = Value("i", 0)
stream_closed_flag.value = False
manager = Manager()
audio_recording_frames = manager.list()
reader_process = Process(
target=transcription_loop,
args=(
(audio_buffer),
(parent_conn),
(stream_closed_flag),
(audio_recording_frames),
(language_code)
),
)
reader_process.daemon = True
reader_process.start()
record_audio = []
# conv_id = jsonData[""]
try:
async for message in websocket:
#print("pkt", message, flush=True)
audio_buffer.put(message)
# Comment out next two lines if no recording needed
record_audio.append(message)
if child_conn.poll():
msg = child_conn.recv()
await websocket.send(msg)
print("Received the message: {}".format(msg), flush=True)
if bool(stream_closed_flag.value) == True:
websocket.close()
print("Breaking out of aysnc for in loop")
break
except (
websockets.exceptions.ConnectionClosedError,
websockets.exceptions.ConnectionClosedOK,
) as error:
stream_closed_flag.value = True
print("Websocket connection closed", error)
print("Stream closed flag set to true")
print("\n\n***Websocket connection closed")
print("Exit from transcription_loop function saving recorded audio")
folderName = os.path.join("recorded_audio", data_origin, conversation_id)
os.makedirs(folderName, exist_ok=True)
fileName = "recorded_audio_" + helper.generate_filename() + ".wav"
full_file_name = os.path.join(folderName, fileName)
#print("\n*** Writing audio data in file:", full_file_name)
#helper.write_audio_wave(record_audio, full_file_name, cfg.SAMPLE_RATE, cfg.SAMPLE_WIDTH, cfg.CHANNELS)
# helper.write_audio_flac(
# record_audio, full_file_name
# )
await websocket.close()
print("\n\n****Google voice recognizer server is now ready", flush=True)
print("interface", cfg.INTERFACE, flush=True)
print("port", cfg.PORT, flush=True)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context.load_cert_chain(
certfile=cfg.CERT_FILE_PATH, keyfile=cfg.KEY_FILE_PATH
)
start_server = websockets.serve(
on_data, cfg.INTERFACE, cfg.PORT, ssl=ssl_context, max_queue=None
)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
442507
|
import json
from dataikuapi.fm.future import FMFuture
import sys
if sys.version_info > (3, 4):
from enum import Enum
else:
class Enum(object):
pass
class FMInstanceSettingsTemplateCreator(object):
def __init__(self, client, label):
"""
A builder class to create an Instance Settings Template
:param str label: The label of the Virtual Network
"""
self.data = {}
self.data["label"] = label
self.client = client
def create(self):
"""
Create the Instance Settings Template
:return: Created InstanceSettingsTemplate
:rtype: :class:`dataikuapi.fm.instancesettingstemplates.FMInstanceSettingsTemplate`
"""
template = self.client._perform_tenant_json(
"POST", "/instance-settings-templates", body=self.data
)
return FMInstanceSettingsTemplate(self.client, template)
def with_setup_actions(self, setup_actions):
"""
Add setup actions
:param list setup_actions: List of :class:`dataikuapi.fm.instancesettingstemplates.FMSetupAction` to be played on an instance
:rtype: :class:`dataikuapi.fm.instancesettingstemplates.FMInstanceSettingsTemplateCreator`
"""
self.data["setupActions"] = setup_actions
return self
def with_license(self, license_file_path=None, license_string=None):
"""
Override global license
:param str license_file_path: Optional, load the license from a json file
:param str license_string: Optional, load the license from a json string
"""
if license_file_path is not None:
with open(license_file_path) as json_file:
license = json.load(json_file)
elif license_string is not None:
license = json.loads(license_string)
else:
raise ValueError(
"a valid license_file_path or license_string needs to be provided"
)
self.data["license"] = json.dumps(license, indent=2)
return self
class FMAWSInstanceSettingsTemplateCreator(FMInstanceSettingsTemplateCreator):
def with_aws_keypair(self, aws_keypair_name):
"""
Add an AWS Keypair to the DSS instance.
Needed to get SSH access to the DSS instance, using the centos user.
:param str aws_keypair_name: Name of an AWS key pair to add to the instance.
"""
self.data["awsKeyPairName"] = aws_keypair_name
return self
def with_startup_instance_profile(self, startup_instance_profile_arn):
"""
Add a Instance Profile to be assign to the DSS instance on startup
:param str startup_instance_profile_arn: ARN of the Instance profile assigned to the DSS instance at startup time
"""
self.data["startupInstanceProfileArn"] = startup_instance_profile_arn
return self
def with_runtime_instance_profile(self, runtime_instance_profile_arn):
"""
Add a Instance Profile to be assign to the DSS instance when running
:param str runtime_instance_profile_arn: ARN of the Instance profile assigned to the DSS instance during runtime
"""
self.data["runtimeInstanceProfileArn"] = runtime_instance_profile_arn
return self
def with_restrict_aws_metadata_server_access(
self, restrict_aws_metadata_server_access=True
):
"""
Restrict AWS metadata server access on the DSS instance.
:param boolean restrict_aws_metadata_server_access: Optional, If true, restrict the access to the metadata server access. Defaults to true
"""
self.data[
"restrictAwsMetadataServerAccess"
] = restrict_aws_metadata_server_access
return self
def with_default_aws_api_access_mode(self):
"""
The DSS Instance will use the Runtime Instance Profile to access AWS API.
"""
self.data["dataikuAwsAPIAccessMode"] = "NONE"
return self
def with_keypair_aws_api_access_mode(
self,
aws_access_key_id,
aws_keypair_storage_mode="NONE",
aws_secret_access_key=None,
aws_secret_access_key_aws_secret_name=None,
aws_secrets_manager_region=None,
):
"""
DSS Instance will use an Access Key to authenticate against the AWS API.
:param str aws_access_key_id: AWS Access Key ID.
:param str aws_keypair_storage_mode: Optional, the storage mode of the AWS api key. Accepts "NONE", "INLINE_ENCRYPTED" or "AWS_SECRETS_MANAGER". Defaults to "NONE"
:param str aws_secret_access_key: Optional, AWS Access Key Secret. Only needed if keypair_storage_mode is "INLINE_ENCRYPTED"
:param str aws_secret_access_key_aws_secret_name: Optional, ASM secret name. Only needed if aws_keypair_storage_mode is "AWS_SECRET_MANAGER"
:param str aws_secrets_manager_region: Optional, Secret Manager region to use. Only needed if aws_keypair_storage_mode is "AWS_SECRET_MANAGER"
"""
if aws_keypair_storage_mode not in [
"NONE",
"INLINE_ENCRYPTED",
"AWS_SECRETS_MANAGER",
]:
raise ValueError(
'aws_keypair_storage_mode should be either "NONE", "INLINE_ENCRYPTED" or "AWS_SECRET_MANAGER"'
)
self.data["dataikuAwsAPIAccessMode"] = "KEYPAIR"
self.data["dataikuAwsKeypairStorageMode"] = aws_keypair_storage_mode
if aws_keypair_storage_mode == "NONE":
return self
self.data["dataikuAwsAccessKeyId"] = aws_access_key_id
if aws_keypair_storage_mode == "INLINE_ENCRYPTED":
if aws_secret_access_key == None:
raise ValueError(
'When aws_keypair_storage_mode is "INLINE_ENCRYPTED", aws_secret_access_key should be provided'
)
self.data["dataikuAwsSecretAccessKey"] = aws_secret_access_key
elif aws_keypair_storage_mode == "AWS_SECRETS_MANAGER":
if aws_secret_access_key_aws_secret_name == None:
raise ValueError(
'When aws_keypair_storage_mode is "AWS_SECRETS_MANAGER", aws_secret_access_key_aws_secret_name should be provided'
)
self.data[
"dataikuAwsSecretAccessKeyAwsSecretName"
] = aws_secret_access_key_aws_secret_name
self.data["awsSecretsManagerRegion"] = aws_secrets_manager_region
return self
class FMAzureInstanceSettingsTemplateCreator(FMInstanceSettingsTemplateCreator):
def with_ssh_key(self, ssh_public_key):
"""
Add an SSH public key to the DSS Instance.
Needed to access it through SSH, using the centos user.
:param str ssh_public_key: The content of the public key to add to the instance.
"""
self.data["azureSshKey"] = ssh_public_key
return self
def with_startup_managed_identity(self, startup_managed_identity):
"""
Add a managed identity to be assign to the DSS instance on startup
:param str startup_managed_identity: Managed Identity ID
"""
self.data["startupManagedIdentity"] = startup_managed_identity
return self
def with_runtime_managed_identity(self, runtime_managed_identity):
"""
Add a managed identity to be assign to the DSS instance when running
:param str runtime_managed_identity: Managed Identity ID
"""
self.data["runtimeManagedIdentity"] = runtime_managed_identity
return self
class FMInstanceSettingsTemplate(object):
def __init__(self, client, ist_data):
self.client = client
self.id = ist_data["id"]
self.ist_data = ist_data
def save(self):
"""
Update the Instance Settings Template.
"""
self.client._perform_tenant_empty(
"PUT", "/instance-settings-templates/%s" % self.id, body=self.ist_data
)
self.ist_data = self.client._perform_tenant_json(
"GET", "/instance-settings-templates/%s" % self.id
)
def delete(self):
"""
Delete the DSS Instance Settings Template.
:return: A :class:`~dataikuapi.fm.future.FMFuture` representing the deletion process
:rtype: :class:`~dataikuapi.fm.future.FMFuture`
"""
future = self.client._perform_tenant_json(
"DELETE", "/instance-settings-templates/%s" % self.id
)
return FMFuture.from_resp(self.client, future)
def add_setup_action(self, setup_action):
"""
Add a setup_action
:param object setup_action: a :class:`dataikuapi.fm.instancesettingstemplates.FMSetupAction`
"""
self.ist_data["setupActions"].append(setup_action)
return self
class FMSetupAction(dict):
def __init__(self, setupActionType, params=None):
"""
A class representing a SetupAction
Do not create this directly, use:
- :meth:`dataikuapi.fm.instancesettingstemplates.FMSetupAction.add_authorized_key`
"""
data = {
"type": setupActionType.value,
}
if params:
data["params"] = params
super(FMSetupAction, self).__init__(data)
@staticmethod
def add_authorized_key(ssh_key):
"""
Return a ADD_AUTHORIZED_KEY FMSetupAction
"""
return FMSetupAction(FMSetupActionType.ADD_AUTHORIZED_KEY, {"sshKey": ssh_key})
@staticmethod
def run_ansible_task(stage, yaml_string):
"""
Return a RUN_ANSIBLE_TASK FMSetupAction
:param object stage: a :class:`dataikuapi.fm.instancesettingstemplates.FMSetupActionStage`
:param str yaml_string: a yaml encoded string defining the ansibles tasks to run
"""
return FMSetupAction(
FMSetupActionType.RUN_ANSIBLE_TASKS,
{"stage": stage.value, "ansibleTasks": yaml_string},
)
@staticmethod
def install_system_packages(packages):
"""
Return an INSTALL_SYSTEM_PACKAGES FMSetupAction
:param list packages: List of packages to install
"""
return FMSetupAction(
FMSetupActionType.INSTALL_SYSTEM_PACKAGES, {"packages": packages}
)
@staticmethod
def setup_advanced_security(basic_headers=True, hsts=False):
"""
Return an SETUP_ADVANCED_SECURITY FMSetupAction
:param boolean basic_headers: Optional, Prevent browsers to render Web content served by DSS to be embedded into a frame, iframe, embed or object tag. Defaults to True
:param boolean hsts: Optional, Enforce HTTP Strict Transport Security. Defaults to False
"""
return FMSetupAction(
FMSetupActionType.SETUP_ADVANCED_SECURITY,
{"basic_headers": basic_headers, "hsts": hsts},
)
@staticmethod
def install_jdbc_driver(
database_type,
url,
paths_in_archive=None,
http_headers=None,
http_username=None,
http_password=<PASSWORD>,
datadir_subdirectory=None,
):
"""
Return a INSTALL_JDBC_DRIVER FMSetupAction
:param object database_type: a :class:`dataikuapi.fm.instancesettingstemplates.FMSetupActionAddJDBCDriverDatabaseType`
:param str url: The full address to the driver. Supports http(s)://, s3://, abs:// or file:// endpoints
:param list paths_in_archive: Optional, must be used when the driver is shipped as a tarball or a ZIP file. Add here all the paths to find the JAR files in the driver archive. Paths are relative to the top of the archive. Wildcards are supported.
:param dict http_headers: Optional, If you download the driver from a HTTP(S) endpoint, add here the headers you want to add to the query. This setting is ignored for any other type of download.
:param str http_username: Optional, If the HTTP(S) endpoint expect a Basic Authentication, add here the username. To explicitely specify which Assigned Identity use if the machine have several, set the client_id here. To authenticate with a SAS Token on Azure Blob Storage (not recommended), use "token" as the value here.
:param str http_password: Optional, If the HTTP(S) endpoint expect a Basic Authentication, add here the password. To authenticate with a SAS Token on Azure Blob Storage (not recommended), store the token in this field.
:param str datadir_subdirectory: Optional, Some drivers are shipped with a high number of JAR files along with them. In that case, you might want to install them under an additional level in the DSS data directory. Set the name of this subdirectory here. Not required for most drivers.
"""
return FMSetupAction(
FMSetupActionType.INSTALL_JDBC_DRIVER,
{
"url": url,
"dbType": database_type.value,
"pathsInArchive": paths_in_archive,
"headers": http_headers,
"username": http_username,
"password": <PASSWORD>,
"subpathInDatadir": datadir_subdirectory,
},
)
@staticmethod
def setup_k8s_and_spark():
"""
Return a SETUP_K8S_AND_SPARK FMSetupAction
"""
return FMSetupAction(FMSetupActionType.SETUP_K8S_AND_SPARK)
class FMSetupActionType(Enum):
RUN_ANSIBLE_TASKS = "RUN_ANSIBLE_TASKS"
INSTALL_SYSTEM_PACKAGES = "INSTALL_SYSTEM_PACKAGES"
INSTALL_DSS_PLUGINS_FROM_STORE = "INSTALL_DSS_PLUGINS_FROM_STORE"
SETUP_K8S_AND_SPARK = "SETUP_K8S_AND_SPARK"
SETUP_RUNTIME_DATABASE = "SETUP_RUNTIME_DATABASE"
SETUP_MUS_AUTOCREATE = "SETUP_MUS_AUTOCREATE"
SETUP_UI_CUSTOMIZATION = "SETUP_UI_CUSTOMIZATION"
SETUP_MEMORY_SETTINGS = "SETUP_MEMORY_SETTINGS"
SETUP_GRAPHICS_EXPORT = "SETUP_GRAPHICS_EXPORT"
ADD_AUTHORIZED_KEY = "ADD_AUTHORIZED_KEY"
INSTALL_JDBC_DRIVER = "INSTALL_JDBC_DRIVER"
SETUP_ADVANCED_SECURITY = "SETUP_ADVANCED_SECURITY"
class FMSetupActionStage(Enum):
after_dss_startup = "after_dss_startup"
after_install = "after_install"
before_install = "before_install"
class FMSetupActionAddJDBCDriverDatabaseType(Enum):
mysql = "mysql"
mssql = "mssql"
oracle = "oracle"
mariadb = "mariadb"
snowflake = "snowflake"
athena = "athena"
bigquery = "bigquery"
|
442518
|
from flex.crypto.csprng.api import generate_csprng_generator
from flex.constants import *
if __name__ == '__main__':
drbg = generate_csprng_generator(b'542435464554342576476747656736767657676545234546', b'', method=CRYPTO_HMAC_DRBG)
onetime_key = drbg.generate(2**16)
print(onetime_key)
drbg.reseed(b'<KEY>')
print(drbg.generate(2**7))
|
442557
|
import pytest
@pytest.fixture(scope='module')
def approx():
from functools import partial
return partial(pytest.approx, abs=2)
def test_complete_iteration(approx, ready_to_sleep):
import asynckivy as ak
async def job():
l = [v async for v in ak.interpolate(start=0, end=100, step=.3)]
assert l == approx([0, 30, 60, 90, 100])
sleep = ready_to_sleep()
task = ak.start(job())
for __ in range(130):
sleep(.01)
assert task.done
def test_break_during_iteration(approx, ready_to_sleep):
import asynckivy as ak
async def job():
l = []
async for v in ak.interpolate(start=0, end=100, step=.3):
l.append(v)
if v > 50:
break
assert l == approx([0, 30, 60, ])
await ak.sleep_forever()
sleep = ready_to_sleep()
task = ak.start(job())
for __ in range(130):
sleep(.01)
assert not task.done
with pytest.raises(StopIteration):
task.root_coro.send(None)
assert task.done
def test_zero_duration(approx):
import asynckivy as ak
async def job():
l = [v async for v in ak.interpolate(start=0, end=100, step=.3, d=0)]
assert l == approx([0, 100])
task = ak.start(job())
assert task.done
|
442599
|
import copy
import srsly
from pathlib import Path
from typing import Optional, Dict, Text, Any, List, Union
def override_defaults(
defaults: Optional[Dict[Text, Any]], custom: Optional[Dict[Text, Any]]
) -> Dict[Text, Any]:
if defaults:
cfg = copy.deepcopy(defaults)
else:
cfg = {}
if custom:
for key in custom.keys():
if isinstance(cfg.get(key), dict):
cfg[key].update(custom[key])
else:
cfg[key] = custom[key]
return cfg
def read_file(path: Union[Path, str], **kwargs) -> List[Dict]:
"""Read train/dev examples from file, either JSON, MD or ConLL format.
Args:
path: file path.
Returns:
list of examples
"""
if not isinstance(path, Path):
path = Path(path)
assert isinstance(path, Path)
ext = path.suffix.lower()
if ext == ".json":
# JSON format is the GOLD standard ...
return list(srsly.read_json(path))
elif ext == ".jsonl":
# same here ..
return list(srsly.read_jsonl(path))
elif ext in (".md", ".markdown"):
from spacy_crfsuite.markdown import MarkdownReader
# With markdown, we can easily convert to JSON
with path.open("r", encoding="utf-8") as f:
md_reader = MarkdownReader()
return md_reader(f.read(), **kwargs)
elif ext in (".txt", ".conll"):
from spacy_crfsuite.conll import read_conll
# CoNLL-02, CoNLL-03
return list(read_conll(path, **kwargs))
else:
raise ValueError(
f"Can't read examples from file with extension: ({ext}). "
f"spacy_crfsuite accepts .json, .jsonl, .txt, .conll files."
)
|
442626
|
from collections import OrderedDict
from functools import partial
from itertools import chain
from ansible.errors import AnsibleError
def subelement_union(chain, key, subkey):
# this will not merge any keys other than subkey, and subkey must be a list
r = OrderedDict()
# if a lookup plugin is called in the variable context and returns a
# one-element list, the return value is replaced with the element
chain = [chain] if not isinstance(chain, list) else chain
for d in chain:
if d[key] in r:
r[d[key]][subkey].extend(filter(lambda i: i not in r[d[key]][subkey], d[subkey]))
else:
r[d[key]] = d
return list(r.values())
def _try_keys(i, kk, vk):
try:
if isinstance(i, list):
# will raise IndexError if it's missing an element
return (i[0], i[1])
if isinstance(i, dict) and len(i) != 1:
# will raise KeyError if either key doesn't exist
return (i[kk], i[vk])
if isinstance(i, dict):
return (list(i.keys())[0], list(i.values())[0])
else:
raise TypeError()
except Exception as exc:
raise exc.__class__(i, exc)
def _islist(i):
assert isinstance(i, list) or isinstance(i, tuple), i
def ordered(*iterables, **kwargs):
"""Merge iterables into an ordered dictionary and return them as a list of pairs
Useful for merging lists of defaults that you want to keep in order
Does not merge recursively (but that could be added from the built-in
combine filter if necessary).
Does not return an OrderedDict directly because Ansible doesn't support the
data type and converts it in to its string representation.
Elements of iterables are themselves an iterable, whose items can be:
- two-item lists, where i[0] is the key and i[1] is the value
- one-item dicts where the key is the key and the value is the value
- dicts where i[k_key] is the key and i[v_key] is the value
"""
k_key = kwargs.get('k_key', 'key')
v_key = kwargs.get('v_key', 'value')
try_keys = partial(_try_keys, kk=k_key, vk=v_key)
try:
_islist(iterables)
map(_islist, iterables)
except AssertionError as exc:
raise AnsibleError("|ordered expects lists, got " + repr(exc[0]))
try:
return list(OrderedDict(map(try_keys, chain(*iterables))).items())
except IndexError as exc:
raise AnsibleError("|ordered must have 2 list elements %s: %s" % (exc[0], repr(exc[1])))
except KeyError as exc:
raise AnsibleError("|ordered key not found in dict (hash) list element %s: %s" % (exc[0], repr(exc[1])))
except TypeError as exc:
raise AnsibleError("|ordered list element must be list or dict, got " + repr(exc[0]))
class FilterModule(object):
def filters(self):
return {
'subelement_union': subelement_union,
'ordered': ordered,
}
|
442657
|
import multiprocessing
import threading
import tensorflow as tf
from Access import Access
from Framework import ExplorerFramework
NUMS_CPU = multiprocessing.cpu_count()
state_size = 4
action_size = 2
tf.reset_default_graph()
sess = tf.Session()
with tf.device("/cpu:0"):
A = Access(state_size, action_size)
F_list = []
for i in range(NUMS_CPU):
F_list.append(ExplorerFramework(A, 'W%i' % i, state_size, action_size))
COORD = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
threads_list = []
for ac in F_list:
job = lambda: ac.run(sess)
t = threading.Thread(target=job)
t.start()
threads_list.append(t)
COORD.join(threads_list)
|
442731
|
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import numpy as np
from climlab import constants as const
from climlab.solar.orbital import OrbitalTable
from climlab.domain.field import global_mean
class OrbitalCycles(object):
def __init__(self,
model,
kyear_start=-20.,
kyear_stop=0.,
segment_length_years=100.,
orbital_year_factor=1.,
verbose=True ):
"""Automatically integrates a process through changes in orbital parameters.
OrbitalCycles is a module for setting up long integrations of climlab
processes over orbital cycles.
The duration between integration start and end time is partitioned in
time segments over which the orbital parameters are held constant.
The process is integrated over every time segment and the process state
``Ts`` is stored for each segment.
The storage arrays are saving:
* **current model state** at end of each segment
* **model state averaged** over last integrated year of each segment
* **global mean** of averaged model state over last integrated year
of each segment
.. note::
Input ``kyear`` is thousands of years after present.
For years before present, use ``kyear < 0``.
**Initialization parameters** \n
:param model: a time dependent process
:type model: :class:`~climlab.process.time_dependent_process.TimeDependentProcess`
:param float kyear_start: integration start time. \n
As time reference
is present, argument should be :math:`<0`
for time before present.
* *unit:* kiloyears \n
* *default value:* ``-20.``
:param float kyear_stop: integration stop time. \n
As time reference
is present, argument should be :math:`\\le 0`
for time before present.
* *unit:* kiloyears \n
* *default value:* ``0.``
:param float segment_length_years: is the length of each integration with
fixed orbital parameters. [default: 100.]
:param float orbital_year_factor: is an optional speed-up to the orbital cycles.
[default: 1.]
:param bool verbose: prints product of calculation and
information about computation progress
[default: True]
**Object attributes** \n
Following object attributes are generated during initialization:
:ivar model: timedependent process to be integrated
:vartype model: :class:`~climlab.process.time_dependent_process.TimeDependentProcess`
:ivar float kyear_start: integration start time
:ivar float kyear_stop: integration stop time
:ivar float segment_length_years: length of each integration with
fixed orbital parameters
:ivar float orbital_year_factor: speed-up factor to the orbital cycles
:ivar bool verbose: print flag
:ivar int num_segments: number of segments with fixed oribtal
parameters, calculated through:
.. math::
num_{seg} = \\frac{-(kyear_{start}-kyear_{stop})*1000}{seg_{length} * orb_{factor}}
:ivar array T_segments_global: storage for global mean temperature
for final year of each segment
:ivar array T_segments: storage for actual temperature at end
of each segment
:ivar array T_segments_annual: storage for timeaveraged temperature
over last year of segment \n
dimension: (size(Ts), num_segments)
:ivar array orb_kyear: integration start time of all segments
:ivar dict orb: orbital parameters for last integrated segment
:Example:
Integration of an energy balance model for 10,000 years with
corresponding orbital parameters::
from climlab.model.ebm import EBM_seasonal
from climlab.solar.orbital_cycles import OrbitalCycles
from climlab.surface.albedo import StepFunctionAlbedo
ebm = EBM_seasonal()
print ebm
# add an albedo feedback
albedo = StepFunctionAlbedo(state=ebm.state, **ebm.param)
ebm.add_subprocess('albedo', albedo)
# start the integration
# run for 10,000 orbital years, but only 1,000 model years
experiment = OrbitalCycles(ebm, kyear_start=-20, kyear_stop=-10,
orbital_year_factor=10.)
"""
self.model = model
self.kyear_start = kyear_start
self.kyear_stop = kyear_stop
self.segment_length_years = segment_length_years
self.orbital_year_factor = orbital_year_factor
self.verbose = verbose
self.num_segments = int(-(kyear_start - kyear_stop) * 1000. /
segment_length_years / orbital_year_factor)
kyear_before_present = kyear_start
if verbose:
print("--------- OrbitalCycles START ----------")
print("Beginning integration for the model from " + str(kyear_start) + " to " +
str(kyear_stop) + " kyears before present.")
print("Integration time for each set of orbital parameters is " +
str(segment_length_years) + " years.")
print("Orbital cycles will be sped up by a factor " + str(orbital_year_factor))
print("Total number of segments is " + str(self.num_segments))
# initialize storage arrays
self.T_segments_global = np.empty( self.num_segments )
self.T_segments = np.empty( (self.model.Ts.size, self.num_segments) )
self.T_segments_annual = np.empty_like( self.T_segments )
self.orb_kyear = np.empty( self.num_segments )
# Get orbital data table
#orbtable = OrbitalTable()
for n in range(self.num_segments):
if verbose:
print("-------------------------")
print("Segment " + str(n) + " out of " + str(self.num_segments) )
print( "Using orbital parameters from " + str(kyear_before_present) + " kyears before present." )
self.orb = OrbitalTable.interp(kyear=kyear_before_present)
#self.model.make_insolation_array( orb )
self.model.subprocess['insolation'].orb = self.orb
self.model.integrate_years(segment_length_years-1., verbose=False)
# Run one final year to characterize the current equilibrated state
self.model.integrate_years(1.0, verbose=False)
self.T_segments_annual[:, n] = np.squeeze(self.model.timeave['Ts'])
self.T_segments[:, n] = np.squeeze(self.model.Ts)
self.T_segments_global[n] = global_mean(self.model.timeave['Ts'])
self.orb_kyear[n] = kyear_before_present
kyear_before_present += segment_length_years / 1000. * orbital_year_factor
if verbose:
print( "Global mean temperature from the final year of integration is " +
str(self.T_segments_global[n]) + " degrees C." )
if verbose:
print("--------- OrbitalCycles END ----------")
|
442800
|
import requests
import json
import re
_translations = {}
square_bracketed_variable = re.compile(r"\[[^\]]*\]")
percent_variable = re.compile(r"%[0-9]")
to_underscore = re.compile(r"[ -:/,%]")
to_delete = re.compile(r"[%?]")
multiple_underscores = re.compile(r"_+")
deleteself = re.compile(r"self[, ]*")
def fetch_translations(lang="en"):
if lang in _translations:
return _translations[lang]
blocks = json.loads(requests.get(f"https://raw.githubusercontent.com/LLK/scratch-l10n/master/editor/blocks/{lang}.json").text)
extensions = json.loads(requests.get(f"https://raw.githubusercontent.com/pystage/scratch-l10n/master/editor/extensions/{lang}.json").text)
interface = json.loads(requests.get(f"https://raw.githubusercontent.com/pystage/scratch-l10n/master/editor/interface/{lang}.json").text)
res = {}
for key in blocks:
res[key.upper()] = blocks[key]
for key in extensions:
res[key.upper().replace(".", "_")] = extensions[key]
for key in interface:
res[key.upper()] = interface[key]
res["MOTION_TURNLEFT"] = res["MOTION_TURNLEFT"].replace("%1", res["BOOST_TILTDIRECTION_LEFT"])
res["MOTION_TURNRIGHT"] = res["MOTION_TURNRIGHT"].replace("%1", res["BOOST_TILTDIRECTION_RIGHT"])
res["EVENT_WHENFLAGCLICKED"] = res["EVENT_WHENFLAGCLICKED"].replace("%1", "<greenflag>")
res["EVENT_WHENGREATERTHAN"] = res["EVENT_WHENGREATERTHAN"].replace(">", "<greater>")
# Another inconsistency where not the opcode is used (typo?)...
res["SOUND_SETEFFECTTO"] = res["SOUND_SETEFFECTO"]
_translations[lang] = res
return res
def trans(key, lang):
translations = fetch_translations(lang)
key = key.upper()
if key in translations:
return translations[key]
return f"MISSING_TRANSLATION: {key}"
def funcname(translation, lang):
translation = square_bracketed_variable.sub("", translation)
translation = to_underscore.sub("_", translation)
translation = percent_variable.sub("", translation)
translation = to_delete.sub("", translation)
translation = translation.replace("#", trans("LOOKS_NUMBERNAME_NUMBER", lang))
translation = multiple_underscores.sub("_", translation)
translation = translation.strip("_").lower()
translation = translation.replace("<greenflag>", "GREENFLAG")
translation = translation.replace("<greater>", "GREATERTHAN")
return translation
|
442804
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ActorsConfig(AppConfig):
name = 'actors'
|
442834
|
from logging import getLogger
from sqlite_dissect.constants import LOGGER_NAME
from sqlite_dissect.file.database.header import DatabaseHeader
"""
utilities.py
This script holds utility functions for dealing with WAL specific objects such as comparing database header rather
than more general utility methods.
This script holds the following function(s):
compare_database_headers(previous_database_header, new_database_header)
"""
def compare_database_headers(previous_database_header, database_header):
logger = getLogger(LOGGER_NAME)
if not isinstance(previous_database_header, DatabaseHeader):
log_message = "The previous database header is not a Database Header but has a type of: {}."
log_message = log_message.format(type(previous_database_header))
logger.error(log_message)
raise ValueError(log_message)
if not isinstance(database_header, DatabaseHeader):
log_message = "The database header is not a Database Header but has a type of: {}."
log_message = log_message.format(type(database_header))
logger.error(log_message)
raise ValueError(log_message)
"""
Since the two objects are the same, we are not worried about possible differences in what properties the
objects have.
"""
database_header_changes = {}
for key in previous_database_header.__dict__.keys():
previous_value = getattr(previous_database_header, key)
value = getattr(database_header, key)
if previous_value != value:
database_header_changes[key] = (previous_value, value)
return database_header_changes
|
442835
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from codecs import open
from configparser import ConfigParser
def read(filename):
config = ConfigParser({
"htrc": False,
"htrc_metadata" : None,
"sentences": False,
'certfile': None,
'keyfile': None,
'ca_certs': None,
'ssl': False,
'port': '8000',
'host': '127.0.0.1',
'icons': 'link',
'corpus_link': None,
'doc_title_format': '{0}',
'doc_url_format': '',
'raw_corpus': None,
'label_module': None,
'fulltext': False,
'pdf' : False,
'topics': [],
'topic_range': None,
'cluster': None,
'corpus_desc' : None,
'home_link' : '/',
'lang' : None,
'tokenizer': 'default',
'label_file' : None
}, allow_no_value=True)
with open(filename, encoding='utf8') as configfile:
config.read_file(configfile)
return config
|
442844
|
import datetime
import re
import warnings
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import Q
from .models import (
UserPasswordDescriptor,
PastPassword,
PasswordHasher,
)
class PasswordComplexityValidator():
'''
BB2-62 POAM strengthen blue button developer account authentication
- increase password complexity
'''
def __init__(
self,
min_length_digit=1,
min_length_alpha=1,
min_length_special=1,
min_length_lower=1,
min_length_upper=1,
special_characters="[~!{}@#$%^&*_+\":;()'[]"
):
self.min_length_digit = min_length_digit
self.min_length_alpha = min_length_alpha
self.min_length_special = min_length_special
self.min_length_lower = min_length_lower
self.min_length_upper = min_length_upper
self.special_characters = special_characters
settings.PASSWORD_RULES[2]['regex'] = self.special_characters
self.actual_params = [
self.min_length_digit,
self.min_length_alpha,
self.min_length_special,
self.min_length_lower,
self.min_length_upper,
]
password_requirements = []
for rule in zip(settings.PASSWORD_RULES, self.actual_params):
if rule[1]:
password_requirements.append(rule[0]['help'].format(rule[1]))
self.help_txt = "Your password must contaion at least {}.".format(', '.join(password_requirements))
def validate(self, password, user=None):
validation_errors = []
for tuple in zip(settings.PASSWORD_RULES, self.actual_params):
rule = tuple[0]
min_len_required = tuple[1]
p = re.compile(rule['regex'])
actual_cnt = len(p.findall(password))
if actual_cnt < min_len_required:
validation_errors.append(ValidationError(
rule['msg'].format(min_len_required),
params={'min_length': min_len_required},
code=rule['name'],
))
if validation_errors:
raise ValidationError(validation_errors)
def get_help_text(self):
return self.help_txt
class PasswordReuseAndMinAgeValidator(object):
'''
BB2-62 POAM strengthen blue button developer account authentication
- enforce min password age and re-use interval
'''
def __init__(self,
password_min_age=60 * 60 * 24,
password_reuse_interval=60 * 60 * 24 * 120,
password_expire=60 * 60 * 24 * 30):
msg1 = "Invalid OPTIONS, password_min_age < password_reuse_interval expected, " \
"but having password_min_age({}) >= password_reuse_interval({})"
msg2 = "Invalid OPTIONS, password_expire < password_reuse_interval expected, " \
"but having password_expire({}) >= password_reuse_interval({})"
msg3 = "Invalid OPTIONS, password_min_age < password_expire expected, " \
"but having password_expire({}) >= password_reuse_interval({})"
check_opt_err = []
if password_min_age > 0 and password_reuse_interval > 0 \
and password_min_age > password_reuse_interval:
check_opt_err.append(msg1.format(password_min_age, password_reuse_interval))
if password_expire > 0 and password_reuse_interval > 0 \
and password_expire > password_reuse_interval:
check_opt_err.append(msg2.format(password_expire, password_reuse_interval))
if password_min_age > 0 and password_expire > 0 \
and password_min_age > password_expire:
check_opt_err.append(msg3.format(password_min_age, password_expire))
if len(check_opt_err) > 0:
raise ValueError(check_opt_err)
self.password_min_age = password_min_age
self.password_reuse_interval = password_reuse_interval
self.password_expire = password_expire
def validate(self, password, user=None):
if not user or getattr(user, 'pk', None) is None or isinstance(getattr(user, 'pk', None), property):
warnings.warn('Validating on invalid user: {}'.format(user))
return
#
# |<--min password age-->|
# |<------------no reuse window--------------------->|
# ------p0-----p1----+---p2-----p3----------p4---------p5---------------+
# ^
# cur_time_utc
# given new password p:
# (1) p's hash colides with any px in 'no reuse window' => validation fails
# (2) p's hash does not colide with any px in 'no reuse window'
# or the window is empty => further check 'min password age'
# (3) there are px in 'no reuse window' => if there is no px in 'min password age'
# like p5 => validation pass
# (4) no px in 'no reuse window' (hence no px in 'min password age'
# since it's asserted that password_min_age < password_reuse_interval) => pass validation
#
cur_time_utc = datetime.datetime.now(datetime.timezone.utc)
for userpassword_desc in UserPasswordDescriptor.objects.filter(user=user):
password_hash = userpassword_desc.create_hash(password)
passwds = None
try:
if self.password_reuse_interval > 0:
# only check invalid reuse (colide) within reuse_interval
reuse_datetime = cur_time_utc - datetime.timedelta(0, self.password_reuse_interval)
passwds = PastPassword.objects.filter(
Q(date_created__gt=reuse_datetime), userpassword_desc=userpassword_desc
).order_by('-date_created')
else:
# no reuse_interval, check all past passwords for colide
passwds = PastPassword.objects.filter(
userpassword_desc=userpassword_desc
).order_by('-date_created')
for p in passwds:
if p.password == <PASSWORD>:
# check invalid re-use (colide) within password reuse interval
raise ValidationError(
("You can not use a password that is already"
" used in this application within password re-use interval [days hh:mm:ss]: {}.")
.format(str(datetime.timedelta(seconds=self.password_reuse_interval))),
code='password_used'
)
except PastPassword.DoesNotExist:
pass
if self.password_min_age > 0 and passwds is not None and passwds.first() is not None:
if (datetime.datetime.now(datetime.timezone.utc)
- passwds.first().date_created).total_seconds() <= self.password_min_age:
# change password too soon
raise ValidationError(
"You can not change password that does not satisfy minimum password age [days hh:mm:ss]: {}."
.format(str(datetime.timedelta(seconds=self.password_min_age))),
code='password_used'
)
def password_changed(self, password, user=None):
if not user or getattr(user, 'pk', None) is None or isinstance(getattr(user, 'pk', None), property):
warnings.warn('Change password on invalid user: {}'.format(user))
return
iter_val = PasswordHasher().iterations
userpassword_desc = UserPasswordDescriptor.objects.filter(
user=user,
iterations=iter_val
).first()
if not userpassword_desc:
userpassword_desc = UserPasswordDescriptor()
userpassword_desc.user = user
userpassword_desc.save()
password_hash = userpassword_desc.create_hash(password)
# We are looking hash password in the database
tz_now = datetime.datetime.now(datetime.timezone.utc)
try:
# with the timestamp now() this look up will certainly not able to get an entry
# this is expected for new entry and re-use password (same user + password hash)
# note, re use of user + password hash will satisfy re use interval first.
PastPassword.objects.get(
userpassword_desc=userpassword_desc,
password=password_hash,
date_created=tz_now
)
except PastPassword.DoesNotExist:
past_password = PastPassword()
past_password.userpassword_desc = userpassword_desc
past_password.password = <PASSWORD>
past_password.save()
def get_help_text(self):
help_msg = ('For security, you can not change your password again for [days hh:mm:ss]: {}, and'
' your new password can not be identical to any of the '
'previously entered in the past [days hh:mm:ss] {}').format(
str(datetime.timedelta(seconds=self.password_min_age)),
str(datetime.timedelta(seconds=self.password_reuse_interval)))
return help_msg
def password_expired(self, user=None):
passwd_expired = False
if user.is_staff or user.is_superuser:
# for staff and above do not enforce password expire
return passwd_expired
if self.password_expire <= 0:
# password never expire, password_expire set to 0 or negative
# effectively disable password expire
return passwd_expired
for userpassword_desc in UserPasswordDescriptor.objects.filter(user=user):
passwds = None
try:
# only check invalid reuse within reuse_interval
passwds = PastPassword.objects.filter(
userpassword_desc=userpassword_desc
).order_by('-date_created')
except PastPassword.DoesNotExist:
pass
if passwds is not None and passwds.first() is not None:
if (datetime.datetime.now(datetime.timezone.utc)
- passwds.first().date_created).total_seconds() >= self.password_expire:
# the elapsed time since last password change / create is more than password_expire
passwd_expired = True
return passwd_expired
|
442857
|
import collections
from datetime import datetime
import logging
from deepbgc import util
import pandas as pd
from deepbgc.models.wrapper import SequenceModelWrapper
from deepbgc.pipeline.step import PipelineStep
import six
import os
class DeepBGCClassifier(PipelineStep):
def __init__(self, classifier, score_threshold=0.5):
if classifier is None or not isinstance(classifier, six.string_types):
raise ValueError('Expected classifier name or path, got {}'.format(classifier))
if (os.path.exists(classifier) or os.path.sep in classifier) and not os.path.isdir(classifier):
classifier_path = classifier
# Set classifier name to filename without suffix
classifier, _ = os.path.splitext(os.path.basename(classifier))
else:
classifier_path = util.get_model_path(classifier, 'classifier')
self.classifier_name = classifier
self.score_threshold = score_threshold
self.model = SequenceModelWrapper.load(classifier_path)
self.total_class_counts = pd.Series()
def run(self, record):
cluster_features = util.get_cluster_features(record)
if not len(cluster_features):
return
logging.info('Classifying %s BGCs using %s model in %s', len(cluster_features), self.classifier_name, record.id)
# Create list of DataFrames with Pfam sequences (one for each cluster)
cluster_pfam_sequences = []
for feature in cluster_features:
cluster_record = util.extract_cluster_record(feature, record)
cluster_pfam_sequences.append(util.create_pfam_dataframe(cluster_record, add_scores=False))
# Predict BGC score of each Pfam
class_scores = self.model.predict(cluster_pfam_sequences)
predicted_classes = []
# Annotate classes to all cluster features
for i, feature in enumerate(cluster_features):
scores = class_scores.iloc[i]
# Add predicted score for each class
score_column = util.format_classification_score_column(self.classifier_name)
feature.qualifiers[score_column] = [util.encode_class_score_string(scores)]
# Add classes with score over given threshold
new_classes = list(class_scores.columns[scores >= self.score_threshold])
class_column = util.format_classification_column(self.classifier_name)
all_classes = new_classes
if feature.qualifiers.get(class_column):
prev_classes = feature.qualifiers.get(class_column)[0].split('-')
all_classes = sorted(list(set(all_classes + prev_classes)))
if all_classes:
feature.qualifiers[class_column] = ['-'.join(all_classes)]
predicted_classes += new_classes or ['no confident class']
# Add detector metadata to the record as a structured comment
if 'structured_comment' not in record.annotations:
record.annotations['structured_comment'] = {}
comment_key = util.format_classifier_meta_key(self.classifier_name)
record.annotations['structured_comment'][comment_key] = collections.OrderedDict(
name=self.classifier_name,
version=self.model.version,
version_timestamp=self.model.timestamp,
classification_timestamp_utc=datetime.utcnow().isoformat(),
score_threshold=self.score_threshold
)
class_counts = pd.Series(predicted_classes).value_counts()
self.total_class_counts = self.total_class_counts.add(class_counts, fill_value=0)
def print_summary(self):
# Print class counts
sorted_counts = self.total_class_counts.sort_values(ascending=False).astype('int64')
class_list = '\n'.join(' {}: {}'.format(cls, count) for cls, count in sorted_counts.items())
logging.info('Number of BGCs with predicted %s: \n%s', self.classifier_name, class_list)
|
442859
|
import json
import os
from pathlib import Path
from bs4 import BeautifulSoup
root = Path(os.path.dirname(__file__)).joinpath("sample-test")
def read_text(path, input_format):
with path.open(encoding="utf-8") as f:
content = f.read().strip()
if input_format == "SPL":
return content
else:
soup = BeautifulSoup(content, "html.parser")
lines = soup.find_all("a", attrs={"id": True})
content = "\n".join([ln.string.strip() for ln in lines])
return content
for testf in ["ROUGE-test.xml", "verify-spl.xml", "verify.xml"]:
file_path = root.joinpath(testf)
soup = None
with file_path.open(encoding="utf-8") as f:
soup = BeautifulSoup(f.read().strip(), "xml")
evals = soup.find_all("EVAL")
data = {}
for e in evals:
summary_root = e.find_next("PEER-ROOT").string.strip()
ref_root = e.find_next("MODEL-ROOT").string.strip()
input_format = e.find_next("INPUT-FORMAT")["TYPE"]
summaries = []
references = []
for kind in ["PEERS", "MODELS"]:
node = e.find_next(kind)
node_type = kind[0]
node_root = summary_root if node_type == "P" else ref_root
nodes = node.find_all(node_type)
for n in nodes:
name = n.string.strip()
p = root.joinpath(*node_root.split("/"), name)
content = read_text(p, input_format)
if node_type == "P":
summaries.append(content)
else:
references.append(content)
data[e["ID"]] = {
"summaries": summaries,
"references": references
}
serialized = json.dumps(data, indent=4)
name, ext = os.path.splitext(testf)
with open(name + ".json", "wb") as f:
f.write(serialized.encode("utf-8"))
|
442869
|
import os
import shutil
import argparse
import tempfile
from handwrite import SHEETtoPNG
from handwrite import PNGtoSVG
from handwrite import SVGtoTTF
def run(sheet, output_directory, characters_dir, config):
SHEETtoPNG().convert(sheet, characters_dir, config)
PNGtoSVG().convert(directory=characters_dir)
SVGtoTTF().convert(characters_dir, output_directory, config)
def converters(sheet, output_directory, directory=None, config=None):
if not directory:
directory = tempfile.mkdtemp()
isTempdir = True
else:
isTempdir = False
default_config = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "default.json"
)
if config is None:
config = default_config
configs_dir = None
if os.path.isdir(config):
configs_dir = config
if os.path.isdir(sheet):
configs_dir = configs_dir or directory + os.sep + "configs"
os.makedirs(configs_dir, exist_ok=True)
for sheet_name in sorted(os.listdir(sheet)):
config_file = (
configs_dir + os.sep + os.path.splitext(sheet_name)[0] + ".json"
)
if not os.path.exists(config_file):
if os.path.isdir(config):
shutil.copyfile(default_config, config_file)
else:
shutil.copyfile(config, config_file)
characters_dir = directory + os.sep + os.path.splitext(sheet_name)[0]
os.makedirs(characters_dir, exist_ok=True)
run(
sheet + os.sep + sheet_name,
output_directory,
characters_dir,
config_file,
)
else:
run(sheet, output_directory, directory, config)
if isTempdir:
shutil.rmtree(directory)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"input_path", help="Path to sample sheet/directory with multiple sample sheets"
)
parser.add_argument("output_directory", help="Directory Path to save font output")
parser.add_argument(
"--directory",
help="Generate additional files to this path (Temp by default)",
default=None,
)
parser.add_argument(
"--config", help="Use custom configuration file/directory", default=None
)
args = parser.parse_args()
converters(args.input_path, args.output_directory, args.directory, args.config)
|
442954
|
from characteristic import Attribute, attributes
LOAD_CONST = 1
BINARY_NEQ = 2
PUTC = 3
BINARY_LEQ = 4
RETURN = 5
STORE_VARIABLE = 6
LOAD_VARIABLE = 7
JUMP = 8
JUMP_IF_NOT_ZERO = 9
JUMP_IF_ZERO = 14
CALL = 10
BINARY_ADD = 11
BINARY_SUB = 12
DEREFERENCE = 13
NAMES = {
LOAD_CONST: "LOAD_CONST",
BINARY_NEQ: "BINARY_NEQ",
PUTC: "PUTC",
BINARY_LEQ: "BINARY_LEQ",
RETURN: "RETURN",
STORE_VARIABLE: "STORE_VARIABLE",
LOAD_VARIABLE: "LOAD_VARIABLE",
JUMP_IF_NOT_ZERO: "JUMP_IF_NOT_ZERO",
JUMP: "JUMP",
CALL: "CALL",
BINARY_ADD: "BINARY_ADD",
BINARY_SUB: "BINARY_SUB",
DEREFERENCE: "DEREFERENCE",
JUMP_IF_ZERO: "JUMP_IF_ZERO",
}
BINARY_OPERATION_BYTECODE = {
"!=": BINARY_NEQ,
"<=": BINARY_LEQ,
"+": BINARY_ADD,
"-": BINARY_SUB,
}
NO_ARG = -42
@attributes(
[
Attribute(name="tape"),
Attribute(name="name"),
Attribute(name="arguments"),
Attribute(name="constants", exclude_from_repr=True),
Attribute(name="variables", exclude_from_repr=True),
],
apply_with_init=False,
)
class Bytecode(object):
"""
The bytecode, man.
.. attribute:: tape
.. attribute:: arguments
a tuple of argument names
.. attribute:: constants
inherited from the :class:`cycy.compiler.Context` that produced this
bytecode
.. attribute:: variables
a mapping between variable names (:class:`str`\ s) and the
indices in an array that they should be assigned to
.. attribute:: name
an optional :class:`str` which is the source-file name
"""
def __init__(self, tape, arguments, constants, variables, name):
self.tape = tape
self.name = name
self.arguments = arguments
self.constants = constants
self.variables = variables
def __iter__(self):
"""Yield (offset, byte_code, arg) tuples.
The `byte_code` will be one of the constants defined above,
and `arg` may be None. `byte_code` and `arg` will be ints.
"""
offset = 0
while offset < len(self.tape):
byte_code = self.tape[offset]
arg = self.tape[offset + 1]
yield (offset, byte_code, arg)
offset += 2
def dump(self, pretty=True):
lines = []
for offset, byte_code, arg in self:
name = NAMES[byte_code]
str_arg = ""
if arg != NO_ARG:
str_arg = "%s" % arg
line = "%s %s %s" % (str(offset), name, str_arg)
if pretty:
if byte_code in (LOAD_CONST, CALL):
line += " => " + self.constants[arg].dump()
elif byte_code in (STORE_VARIABLE, LOAD_VARIABLE):
for name, index in self.variables.items():
if index == arg:
line += " => " + name
break
elif byte_code == RETURN:
if arg:
line += " (top of stack)"
else:
line += " (void return)"
lines.append(line.strip())
return "\n".join(lines)
def cleaned(humanish_bytecode):
"""
Take bytecode in a humanish format::
LOAD_CONST 0
DO_STUFF 2 3 # do cool thangs
and clean comments and whitespace.
"""
return humanish_bytecode
|
442964
|
import click
from click.testing import CliRunner
import pytest
from cellpy import log
from cellpy import prms
import cellpy
from cellpy import cli, prmreader
NUMBER_OF_DIRS = 9
def test_get_user_name():
u = prmreader.get_user_name()
print(f"\ncurrent username: {u}")
def test_get_user_dir_and_dst():
user_dir, dst_file = prmreader.get_user_dir_and_dst("filename.conf")
print(f"\nuserdir: {user_dir}")
def test_create_custom_init_filename():
u = prmreader.create_custom_init_filename()
print(f"\ncustom config-file-name: {u}")
def test_get_package_prm_dir():
u = cli.get_package_prm_dir()
print(f"\npackage directory: {u}")
def test_info_version():
runner = CliRunner()
result = runner.invoke(cli.cli, ["info", "--version"])
print(result.output)
assert result.exit_code == 0
assert f"[cellpy] version: {cellpy.__version__}" in result.output
def test_info_configloc():
runner = CliRunner()
result = runner.invoke(cli.cli, ["info", "--configloc"])
print()
print(result.output)
assert result.exit_code == 0
assert "conf" in result.output
def test_info_no_option():
runner = CliRunner()
result = runner.invoke(cli.cli, ["info"])
print()
print(result.output)
assert result.exit_code == 0
def test_info_help():
runner = CliRunner()
result = runner.invoke(cli.cli, ["info", "--help"])
print()
print(result.output)
assert result.exit_code == 0
assert "--help" in result.output
def test_info_params():
runner = CliRunner()
result = runner.invoke(cli.cli, ["info", "--params"])
print("\n", result.output)
assert result.exit_code == 0
assert "prms.Paths.outdatadir" in result.output
def test_info_check():
runner = CliRunner()
result = runner.invoke(cli.cli, ["info", "--check"])
print("\n", result.output)
assert result.exit_code == 0
@pytest.mark.slowtest
def test_pull_tests(tmp_path):
runner = CliRunner()
opts = list()
opts.append("pull")
opts.append("--tests")
opts.append("--directory")
opts.append(tmp_path)
opts.append("--password")
opts.append("env")
result = runner.invoke(cli.cli, opts)
print("\n", result.output)
if result.exception:
print(result.exception)
assert result.exception.status == 403
else:
assert result.exit_code == 0
@pytest.mark.slowtest
def test_pull_examples(tmp_path):
import github
runner = CliRunner()
opts = list()
opts.append("pull")
opts.append("--examples")
opts.append("--directory")
opts.append(tmp_path)
opts.append("--password")
opts.append("env")
result = runner.invoke(cli.cli, ["pull", "--examples"])
print("\n", result.output)
if result.exception:
print(result.exception)
assert result.exception.status == 403
else:
assert result.exit_code == 0
@pytest.mark.slowtest
def test_pull_clone():
runner = CliRunner()
result = runner.invoke(cli.cli, ["pull", "--clone"])
print("\n", result.output)
assert result.exit_code == 0
@pytest.mark.slowtest
def test_pull_custom_dir():
runner = CliRunner()
result = runner.invoke(cli.cli, ["pull", "--clone", "--directory", "MyDir"])
print("\n", result.output)
assert result.exit_code == 0
@pytest.mark.slowtest
def test_pull_help():
runner = CliRunner()
result = runner.invoke(cli.cli, ["pull", "--help"])
print("\n", result.output)
assert result.exit_code == 0
def test_run_help():
runner = CliRunner()
result = runner.invoke(cli.cli, ["run", "--help"])
print("\n", result.output)
assert result.exit_code == 0
def test_run_empty():
runner = CliRunner()
result = runner.invoke(cli.cli, ["run"])
print("\n", result.output)
assert result.exit_code != 0
def test_run():
name = "20190210_cell001_cc_01.h5"
runner = CliRunner()
result = runner.invoke(cli.cli, ["run", name])
print("\n", result.output)
assert result.exit_code == 0
def test_run_debug():
name = "20190210_cell001_cc_01.h5"
runner = CliRunner()
result = runner.invoke(cli.cli, ["run", "--debug", name])
print("\n", result.output)
assert result.exit_code == 0
def test_run_journal():
name = "20190210_cell001_cc_01.h5"
runner = CliRunner()
result = runner.invoke(cli.cli, ["run", "--journal", name])
print("\n", result.output)
assert result.exit_code == 0
def test_run_journal_silent():
name = "20190210_cell001_cc_01.h5"
runner = CliRunner()
result = runner.invoke(cli.cli, ["run", "--journal", "--silent", name])
print("\n", result.output)
assert result.exit_code == 0
def test_run_journal_debug():
name = "20190210_cell001_cc_01.h5"
runner = CliRunner()
result = runner.invoke(cli.cli, ["run", "--journal", "--debug", name])
print("\n", result.output)
assert result.exit_code == 0
def test_cli_help():
runner = CliRunner()
result = runner.invoke(cli.cli, ["--help"])
print("\n", result.output)
assert result.exit_code == 0
def test_cli_setup_help():
runner = CliRunner()
result = runner.invoke(cli.cli, ["setup", "--help"])
print("\n", result.output)
assert result.exit_code == 0
def test_cli_setup():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli.cli, ["setup", "--dry-run"])
print(result.output)
assert result.exit_code == 0
def test_cli_setup_interactive():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
cli.cli, ["setup", "-i", "--dry-run"], input=NUMBER_OF_DIRS * "\n"
)
print(result.output)
assert result.exit_code == 0
def test_cli_setup_custom_dir():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
cli.cli,
["setup", "-i", "--dry-run", "-d", "just_a_dir"],
input=NUMBER_OF_DIRS * "\n",
)
print(result.output)
assert result.exit_code == 0
|
443009
|
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
from scipy import ndimage
center = [39.802613264377825, -14.536830769290223]
center = [53.5480150544989, -20.38914395572289]
center = [ 51.86099209979376,15.925813399768762]
center = [6.859298612741445, 64.77839469486523][::-1]
rot = +17+180
im = plt.imread('d.png')
#im = im[:,1040:]
print(im.shape)
rotated_img = ndimage.rotate(im, rot)
w = rotated_img.shape[1]
h = rotated_img.shape[0]
m = Basemap(projection='cass',lon_0 = center[1],lat_0 = center[0],width = w*4000*0.8,height = h*4000*0.8, resolution = "i")
m.drawcoastlines(color='yellow')
m.drawcountries(color='yellow')
im = plt.imshow(rotated_img, extent=(*plt.xlim(), *plt.ylim()))
plt.show()
|
443018
|
import pickle
import mysql.connector
dbpares = mysql.connector.connect(
host="localhost",
user="pablo",
password="<PASSWORD>",
database="ALGOFIprecios_diario"
)
cursor_diario = dbpares.cursor()
with open("/scripts/lista_pares", "rb") as fp:
lista_pares = pickle.load(fp)
for elemento in lista_pares:
try:
ASSET_ID = elemento[0]
ALGO_ID = elemento[1]
nombre_fichero1 = str(ALGO_ID) + "_" + str(ASSET_ID)
print(nombre_fichero1)
sql = "DROP TABLE %s" % nombre_fichero1
cursor_diario.execute(sql)
dbpares.commit()
sql = "DELETE FROM liquidez WHERE pool_id = %s" % ("'" + nombre_fichero1 + "'", )
print(sql)
cursor_diario.execute(sql)
dbpares.commit()
except Exception as excepcion:
print(excepcion)
|
443033
|
from dan.layers import HeatMap
import torch
def test_heatmap():
for num_lmks in [10, 13, 56]:
for img_size in [20, 112, 224]:
lmks = torch.randint(0, img_size - 1, (3, num_lmks, 2)).float()
for patch_size_div in [6, 7, 8, 9, 10]:
patch_size = img_size // patch_size_div
hm_layer = HeatMap((img_size, img_size), patch_size)
assert hm_layer(lmks).shape == (3, 1, img_size, img_size)
|
443058
|
import sqlite3
import os
import shutil
import traceback
from cave.libcave.video import Video
from cave.libcave.tag import Tag
from cave.libcave.videoutils import hash_video
import datetime
import time
from misc.log import with_logging
from cave.libcave.sql import SqlClass
#Increment whenever SQL tables change in a way that will break old files.
#You should also specify an upgrade function (see below) to provide
#backwards compatibility
DATABASE_VERSION = 2
# Version 2: Add video_hash key to the video table.
#Database info sqlite table for storing metadata for a database
class DBInfo(SqlClass):
@classmethod
def table_setup(cls):
cls.table_name = "dbinfo"
cls.c.execute("""CREATE TABLE IF NOT EXISTS %s (
id INTEGER PRIMARY KEY,
version INTEGER
)""" % cls.table_name)
cls.commit()
if len(cls.get_all()) == 0:
new = cls.new(version=DATABASE_VERSION)
@classmethod
def get_version(cls):
return cls.get_all()[0].version
@classmethod
def set_version(cls, v):
q = cls.get_all()[0]
q.version = v
q.update()
@with_logging
class Database:
def __init__(self, filename):
if filename is None:
self.log.warning("Invalid database filename specified")
self.error = True
return None
try:
self.conn = sqlite3.connect(filename, check_same_thread=False, \
isolation_level="EXCLUSIVE")
except sqlite3.OperationalError:
self.log.error("Failed to open database file: %s" % filename)
self.error = True
return None
self.conn.row_factory = sqlite3.Row #return rows objects instead of raw tuples
self.c = self.conn.cursor()
self.c.execute("PRAGMA synchronous = 0")
self.c.execute("PRAGMA journal_mode = OFF")
self.filename = os.path.abspath(filename)
self.root_dir = os.path.dirname(self.filename)
DBInfo.link_sqlite(self)
Video.link_sqlite(self)
Tag.link_sqlite(self)
### Upgrade functionality
db_version = DBInfo.get_version()
if db_version > DATABASE_VERSION:
self.log.error("Are you trying to provide a database file from the future? Why would you do that? (provided database version %d; expected <= %d)" % (db_version, DATABASE_VERSION))
self.error = True
return None
if db_version != DATABASE_VERSION:
self.log.warning("Old database version (database is version %d and most recent is version %d)" % (db_version, DATABASE_VERSION))
backup_filename = "cave_upgrade_backup"
self.log.info("Creating database backup at %s" % backup_filename)
shutil.copy2(filename, backup_filename)
#Methods provided to upgrade an old database version to the newest version
#Running update_functions[i] must upgrade a database of version i-1 to version i.
#An upgrade function should be provided whenever the DATABASE_VERSION is updated to
#ensure old versions are still compatible
upgrade_functions = {}
def add_hash():
SqlClass.turn_off_commits()
videos = Video.get_all()
tags = [video.get_tags() for video in videos]
# We need to get all the frame info before
# we erase the video table!
for tag_ls in tags:
for tag in tag_ls:
tag._populate_frame_dict()
for video in videos:
if not video.present():
self.log.error("Not all videos are present, cannot upgrade database!")
return False
[video.remove() for video in videos]
Video.remove_table()
Video.table_setup()
for i, video in enumerate(videos):
video.video_hash = \
hash_video(self.get_absolute_path(video.video_path))
Video.add(video)
for tag in tags[i]:
self.log.info("Adding tag %s in video %s" %(tag, video.video_path))
Tag.tag_add(tag)
SqlClass.turn_on_commits()
self.conn.commit()
return True
upgrade_functions[2] = add_hash
while db_version < DATABASE_VERSION:
i = db_version + 1
if i not in upgrade_functions.keys():
self.error = True
self.log.error("Unable to upgrade database to version %d: No upgrade functionality provided." % i)
return None
self.log.info("Upgrading database from version %d to %d" \
% (db_version, i))
try:
success = upgrade_functions[i]()
except Exception as e:
traceback.print_exc()
success = False
if success:
DBInfo.set_version(i)
db_version = i
else:
self.log.error("Upgrading database failed.")
shutil.copy2(backup_filename, filename)
os.remove(backup_filename)
break
###
self.log.info("Database linked to %s" % filename)
self.error = False
def get_filename(self):
return self.filename
#Get a relative path (for storing in the database) based on the absolute path
def get_relative_path(self, abspath):
abspath = os.path.abspath(abspath)
if os.path.commonprefix([abspath, self.root_dir]) != self.root_dir:
self.log.error("File chosen outside of root database directory")
return
return os.path.relpath(abspath, self.root_dir)
#Get absolute path from a path stored in the datbase
def get_absolute_path(self, relpath):
return os.path.join(self.root_dir, relpath)
|
443077
|
from setuptools import setup, find_packages
PACKAGE_NAME = 'python-ecobee'
PACKAGES = find_packages(exclude=['tests', 'tests.*', 'python'])
REQUIRES = [
'requests>=2,<3',
]
setup(
name=PACKAGE_NAME,
version='1.0.0',
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
description='Library to talk to an Ecobee thermostat',
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=REQUIRES,
keywords=['home', 'automation'],
classifiers=[
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Topic :: Home Automation'
]
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.