code stringlengths 281 23.7M |
|---|
def make_connection(wire_nodes, wire1, wire2):
if (wire_nodes[wire1] is wire_nodes[wire2]):
assert (wire1 in wire_nodes[wire1])
assert (wire2 in wire_nodes[wire2])
return
new_node = (wire_nodes[wire1] | wire_nodes[wire2])
for wire in new_node:
wire_nodes[wire] = new_node |
def parse_host_file(fpath):
lines = __read_from_file(fpath)
results = []
for line in lines:
find = line.find(':')
if (find < 1):
raise FilefmtErr(('wrong host_rule content %s' % line))
a = line[0:find]
e = (find + 1)
try:
b = int(line[e:])
except ValueError:
raise FilefmtErr(('wrong host_rule content %s' % line))
results.append((a, b))
return results |
class Snackbar(Component):
css_classes = ['mdc-snackbar', 'mdc-snackbar--open']
name = 'Material Design Snackbar'
str_repr = '\n<aside {attrs}>\n <div class="mdc-snackbar__surface" role="status" aria-relevant="additions">\n <div class="mdc-snackbar__label" aria-atomic="false">\n Can\'t send photo. Retry in 5 seconds.\n </div>\n <div class="mdc-snackbar__actions" aria-atomic="true">\n <button type="button" class="mdc-button mdc-snackbar__action">\n <div class="mdc-button__ripple"></div>\n <span class="mdc-button__label">Retry</span>\n </button>\n </div>\n </div>\n</aside>\n'
_js__builder__ = 'window[htmlObj.id] = new mdc.snackbar.MDCSnackbar(htmlObj); console.log(window[htmlObj.id])'
def dom(self) -> DomMdcSnackbar.DomSnackbar:
if (self._dom is None):
self._dom = DomMdcSnackbar.DomSnackbar(component=self, page=self.page)
return self._dom |
class SensorsFansCommandHandler(MethodCommandHandler):
def __init__(self) -> None:
super().__init__('sensors_fans')
return
def handle(self, params: str) -> Payload:
tup = self.get_value()
assert isinstance(tup, dict)
(source, param) = split(params)
if ((source == '*') or (source == '*;')):
tup = {k: [i.current for i in v] for (k, v) in tup.items()}
return string_from_dict_optionally(tup, source.endswith(';'))
if (source in tup):
llist = tup[source]
(label, param) = split(param)
if ((label == '') and (param == '')):
return [i.current for i in llist]
elif ((label == '*') or (label == '*;')):
llist = [i._asdict() for i in llist]
return string_from_dict_optionally(llist, label.endswith(';'))
else:
temps = (llist[int(label)] if label.isdigit() else next((x for x in llist if (x.label == label)), None))
if (temps is None):
raise Exception((((("Device '" + label) + "' in '") + self.name) + "' is not supported"))
if (param == ''):
return temps.current
elif ((param == '*') or (param == '*;')):
return string_from_dict_optionally(temps._asdict(), param.endswith(';'))
else:
return temps._asdict()[param]
raise Exception((((("Fan '" + source) + "' in '") + self.name) + "' is not supported")) |
def test_local_module() -> None:
dependencies: list[Dependency] = []
modules_locations = [ModuleLocations(ModuleBuilder('foobar', {'foo', 'foobar'}, frozenset(), dependencies).build(), [Location(Path('foo.py'), 1, 2)])]
assert (DEP001MissingDependenciesFinder(modules_locations, dependencies).find() == []) |
def extractHaneulXBadaTumblrCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestBasicBuilder():
def test_raise_tuner_sample(self, monkeypatch, tmp_path):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test.yaml'])
config = ConfigArgBuilder(*all_configs, desc='Test Builder')
now = datetime.datetime.now()
curr_int_time = int(f'{now.year}{now.month}{now.day}{now.hour}{now.second}')
with pytest.raises(ValueError):
config_values = config.save(file_extension='.yaml', file_name=f'pytest.{curr_int_time}', user_specified_path=tmp_path, add_tuner_sample=True)
def test_raise_save_best(self, monkeypatch, tmp_path):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test.yaml'])
config = ConfigArgBuilder(*all_configs, desc='Test Builder')
now = datetime.datetime.now()
curr_int_time = int(f'{now.year}{now.month}{now.day}{now.hour}{now.second}')
with pytest.raises(ValueError):
config_values = config.save_best(file_extension='.yaml', file_name=f'pytest.{curr_int_time}', user_specified_path=tmp_path) |
_required
def activity_proposal(request, event_slug):
event = get_object_or_404(Event, event_slug=event_slug)
if (not event.activity_proposal_is_open):
messages.error(request, _('The activity proposal is already closed or the event is not accepting proposals through this page. Please contact the Event Organization Team to submit it.'))
return redirect(reverse('index', args=[event_slug]))
(event_user, created) = EventUser.objects.get_or_create(user=request.user, event=event)
if created:
logger.info('The user %s is proposing an activity in event: %s. Creating EventUser and Attendee', event_user.user.email, event)
Attendee(event_user=event_user, first_name=event_user.user.first_name, last_name=event_user.user.last_name, email=event_user.user.email, event=event_user.event, nickname=event_user.user.username)
activity = Activity(event=event, status='1', owner=event_user)
activity_form = ActivityProposalForm((request.POST or None), (request.FILES or None), instance=activity)
if request.POST:
if activity_form.is_valid():
try:
activity = activity_form.save()
return redirect(reverse('image_cropping', args=[event_slug, activity.pk]))
except Exception as error_message:
logger.error(error_message)
messages.error(request, _('There was a problem submitting the proposal. Please check the form for errors.'))
return render(request, 'activities/proposal.html', update_event_info(event_slug, {'form': activity_form, 'errors': [], 'multipart': True}, event=event)) |
def get_global_radix_info(size):
assert (size == (2 ** helpers.log2(size)))
base_radix = min(size, 128)
num_radices = 0
while (size > base_radix):
size //= base_radix
num_radices += 1
radix_list = (([base_radix] * num_radices) + [size])
radix1_list = []
radix2_list = []
for radix in radix_list:
if (radix <= 8):
radix1_list.append(radix)
radix2_list.append(1)
else:
radix1 = 2
radix2 = (radix // radix1)
while (radix2 > radix1):
radix1 *= 2
radix2 = (radix // radix1)
radix1_list.append(radix1)
radix2_list.append(radix2)
for (radix, radix1, radix2) in zip(radix_list, radix1_list, radix2_list):
assert (radix2 <= radix1)
assert ((radix1 * radix2) == radix)
assert (radix1 <= MAX_RADIX)
return (radix_list, radix1_list, radix2_list) |
def git_clone(url, dstpath, branch):
debug_print((((('git_clone(url=' + url) + ', dstpath=') + dstpath) + ')'))
if os.path.isdir(os.path.join(dstpath, '.git')):
debug_print((((("Repository '" + url) + "' already cloned to directory '") + dstpath) + "', skipping."))
return True
mkdir_p(dstpath)
git_clone_args = ['--recurse-submodules', '--branch', branch]
if GIT_CLONE_SHALLOW:
git_clone_args += ['--depth', '1']
print((('Cloning from ' + url) + '...'))
return (run((([GIT(), 'clone'] + git_clone_args) + [url, dstpath])) == 0) |
class TestNestedSlugRelatedField(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset([MockObject(pk=1, name='foo', nested=MockObject(pk=2, name='bar', nested=MockObject(pk=7, name='foobar'))), MockObject(pk=3, name='hello', nested=MockObject(pk=4, name='world', nested=MockObject(pk=8, name='helloworld'))), MockObject(pk=5, name='harry', nested=MockObject(pk=6, name='potter', nested=MockObject(pk=9, name='harrypotter')))])
self.instance = self.queryset.items[2]
self.field = serializers.SlugRelatedField(slug_field='name', queryset=self.queryset)
self.nested_field = serializers.SlugRelatedField(slug_field='nested__name', queryset=self.queryset)
self.nested_nested_field = serializers.SlugRelatedField(slug_field='nested__nested__name', queryset=self.queryset)
def test_slug_related_nested_nested_lookup_exists(self):
instance = self.nested_nested_field.to_internal_value(self.instance.nested.nested.name)
assert (instance is self.instance)
def test_slug_related_nested_nested_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_nested_field.to_internal_value('doesnotexist')
msg = excinfo.value.detail[0]
assert (msg == 'Object with nested__nested__name=doesnotexist does not exist.')
def test_slug_related_nested_nested_lookup_invalid_type(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_nested_field.to_internal_value(BadType())
msg = excinfo.value.detail[0]
assert (msg == 'Invalid value.')
def test_nested_nested_representation(self):
representation = self.nested_nested_field.to_representation(self.instance)
assert (representation == self.instance.nested.nested.name)
def test_nested_nested_overriding_get_queryset(self):
qs = self.queryset
class NoQuerySetSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return qs
field = NoQuerySetSlugRelatedField(slug_field='nested__nested__name')
field.to_internal_value(self.instance.nested.nested.name)
def test_slug_related_nested_lookup_exists(self):
instance = self.nested_field.to_internal_value(self.instance.nested.name)
assert (instance is self.instance)
def test_slug_related_nested_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_field.to_internal_value('doesnotexist')
msg = excinfo.value.detail[0]
assert (msg == 'Object with nested__name=doesnotexist does not exist.')
def test_slug_related_nested_lookup_invalid_type(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_field.to_internal_value(BadType())
msg = excinfo.value.detail[0]
assert (msg == 'Invalid value.')
def test_nested_representation(self):
representation = self.nested_field.to_representation(self.instance)
assert (representation == self.instance.nested.name)
def test_nested_overriding_get_queryset(self):
qs = self.queryset
class NoQuerySetSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return qs
field = NoQuerySetSlugRelatedField(slug_field='nested__name')
field.to_internal_value(self.instance.nested.name)
def test_slug_related_lookup_exists(self):
instance = self.field.to_internal_value(self.instance.name)
assert (instance is self.instance)
def test_slug_related_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.field.to_internal_value('doesnotexist')
msg = excinfo.value.detail[0]
assert (msg == 'Object with name=doesnotexist does not exist.')
def test_slug_related_lookup_invalid_type(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.field.to_internal_value(BadType())
msg = excinfo.value.detail[0]
assert (msg == 'Invalid value.')
def test_representation(self):
representation = self.field.to_representation(self.instance)
assert (representation == self.instance.name)
def test_overriding_get_queryset(self):
qs = self.queryset
class NoQuerySetSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return qs
field = NoQuerySetSlugRelatedField(slug_field='name')
field.to_internal_value(self.instance.name) |
def test_wrapped_tasks_error(capfd):
subprocess.run([sys.executable, str((test_module_dir / 'simple_decorator.py'))], env={'SCRIPT_INPUT': '0', 'SYSTEMROOT': 'C:\\Windows', 'HOMEPATH': 'C:\\Windows'}, text=True)
out = capfd.readouterr().out
assert (out.replace('\r', '').strip().split('\n')[:5] == ['before running my_task', 'try running my_task', 'error running my_task: my_task failed with input: 0', 'finally after running my_task', 'after running my_task']) |
def check_userdoc(contract_name: str, data: Dict[(str, Any)], warnings: Dict[(str, str)]) -> Dict[(str, str)]:
if (('userdoc' not in data) or (not data['userdoc'])):
return assoc_in(warnings, ['contractTypes', contract_name, 'userdoc'], WARNINGS['userdoc_missing'].format(contract_name))
return warnings |
class Entity():
def __init__(self, name, externally_visible=False):
assert isinstance(name, str)
assert isinstance(externally_visible, bool)
self.name = name
self.is_externally_visible = externally_visible
def dump(self):
raise ICE('dump is not defined') |
def test_checker_simple():
warnings = check_manifest({})
assert (warnings == {'manifest': "Manifest missing a required 'manifest' field.", 'name': "Manifest missing a suggested 'name' field", 'version': "Manifest missing a suggested 'version' field.", 'meta': "Manifest missing a suggested 'meta' field.", 'sources': 'Manifest is missing a sources field, which defines a source tree that should comprise the full source tree necessary to recompile the contracts contained in this release.', 'contractTypes': "Manifest does not contain any 'contractTypes'. Packages should only include contract types that can be found in the source files for this package. Packages should not include contract types from dependencies. Packages should not include abstract contracts in the contract types section of a release.", 'compilers': 'Manifest is missing a suggested `compilers` field.'}) |
class PostgresSearchBackend(SearchBackend):
search_config = getattr(settings, 'WATSON_POSTGRES_SEARCH_CONFIG', 'pg_catalog.english')
def escape_postgres_query(self, text):
return ' & '.join(('$${0}$$:*'.format(word) for word in escape_query(text, RE_POSTGRES_ESCAPE_CHARS).split()))
def is_installed(self):
connection = connections[router.db_for_read(SearchEntry)]
cursor = connection.cursor()
cursor.execute("\n SELECT attname FROM pg_attribute\n WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'watson_searchentry') AND attname = 'search_tsv';\n ")
return bool(cursor.fetchall())
()
def do_install(self):
connection = connections[router.db_for_write(SearchEntry)]
connection.cursor().execute("\n -- Ensure that plpgsql is installed.\n CREATE OR REPLACE FUNCTION make_plpgsql() RETURNS VOID LANGUAGE SQL AS\n $$\n CREATE LANGUAGE plpgsql;\n $$;\n SELECT\n CASE\n WHEN EXISTS(\n SELECT 1\n FROM pg_catalog.pg_language\n WHERE lanname='plpgsql'\n )\n THEN NULL\n ELSE make_plpgsql() END;\n DROP FUNCTION make_plpgsql();\n\n -- Create the search index.\n ALTER TABLE watson_searchentry ADD COLUMN search_tsv tsvector NOT NULL;\n CREATE INDEX watson_searchentry_search_tsv ON watson_searchentry USING gin(search_tsv);\n\n -- Create the trigger function.\n CREATE OR REPLACE FUNCTION watson_searchentry_trigger_handler() RETURNS trigger AS $$\n begin\n new.search_tsv :=\n setweight(to_tsvector('{search_config}', coalesce(new.title, '')), 'A') ||\n setweight(to_tsvector('{search_config}', coalesce(new.description, '')), 'C') ||\n setweight(to_tsvector('{search_config}', coalesce(new.content, '')), 'D');\n return new;\n end\n $$ LANGUAGE plpgsql;\n CREATE TRIGGER watson_searchentry_trigger BEFORE INSERT OR UPDATE\n ON watson_searchentry FOR EACH ROW EXECUTE PROCEDURE watson_searchentry_trigger_handler();\n ".format(search_config=self.search_config))
()
def do_uninstall(self):
connection = connections[router.db_for_write(SearchEntry)]
connection.cursor().execute('\n ALTER TABLE watson_searchentry DROP COLUMN search_tsv;\n\n DROP TRIGGER watson_searchentry_trigger ON watson_searchentry;\n\n DROP FUNCTION watson_searchentry_trigger_handler();\n ')
requires_installation = True
supports_ranking = True
supports_prefix_matching = True
def do_search(self, engine_slug, queryset, search_text):
return queryset.extra(where=("search_tsv to_tsquery('{search_config}', %s)".format(search_config=self.search_config),), params=(self.escape_postgres_query(search_text),))
def do_search_ranking(self, engine_slug, queryset, search_text):
return queryset.annotate(watson_rank=RawSQL("ts_rank_cd(watson_searchentry.search_tsv, to_tsquery('{config}', %s))".format(config=self.search_config), (self.escape_postgres_query(search_text),))).order_by('-watson_rank')
def do_filter(self, engine_slug, queryset, search_text):
model = queryset.model
content_type = ContentType.objects.get_for_model(model)
connection = connections[queryset.db]
pk = model._meta.pk
if has_int_pk(model):
ref_name = 'object_id_int'
ref_name_typecast = ''
watson_id_typecast = ''
elif has_uuid_pk(model):
ref_name = 'object_id'
ref_name_typecast = ''
watson_id_typecast = '::uuid'
else:
ref_name = 'object_id'
ref_name_typecast = '::text'
watson_id_typecast = ''
return queryset.extra(tables=('watson_searchentry',), where=('watson_searchentry.engine_slug = %s', "watson_searchentry.search_tsv to_tsquery('{search_config}', %s)".format(search_config=self.search_config), 'watson_searchentry.{ref_name}{watson_id_typecast} = {table_name}.{pk_name}{ref_name_typecast}'.format(ref_name=ref_name, table_name=connection.ops.quote_name(model._meta.db_table), pk_name=connection.ops.quote_name((pk.db_column or pk.attname)), ref_name_typecast=ref_name_typecast, watson_id_typecast=watson_id_typecast), 'watson_searchentry.content_type_id = %s'), params=(engine_slug, self.escape_postgres_query(search_text), content_type.id))
def do_filter_ranking(self, engine_slug, queryset, search_text):
return queryset.annotate(watson_rank=RawSQL("ts_rank_cd(watson_searchentry.search_tsv, to_tsquery('{config}', %s))".format(config=self.search_config), (self.escape_postgres_query(search_text),))).order_by('-watson_rank')
def do_string_cast(self, connection, column_name):
return '{column_name}::text'.format(column_name=connection.ops.quote_name(column_name)) |
class TestArchChecker_1(TestArchChecker):
def test_significantlySimilar_1(self):
cwd = os.path.dirname(os.path.realpath(__file__))
ck = TestArchiveChecker('{cwd}/test_ptree/notQuiteAllArch.zip'.format(cwd=cwd))
ret = ck.getSignificantlySimilarArches(searchDistance=2)
expect = {5: ['{cwd}/test_ptree/allArch.zip'.format(cwd=cwd)]}
self.assertEqual(ret, expect)
def test_junkFileFiltering(self):
cwd = os.path.dirname(os.path.realpath(__file__))
ck = TestArchiveChecker('{cwd}/test_ptree/z_reg_junk.zip'.format(cwd=cwd))
ret = ck.getSignificantlySimilarArches()
expect = {2: ['{cwd}/test_ptree/z_reg.zip'.format(cwd=cwd)]}
self.assertEqual(ret, expect)
def test_sizeFiltering2(self):
cwd = os.path.dirname(os.path.realpath(__file__))
ck = TestArchiveChecker('{cwd}/test_ptree/z_sml.zip'.format(cwd=cwd))
ret = ck.getSignificantlySimilarArches()
expect = {2: ['{cwd}/test_ptree/z_reg.zip'.format(cwd=cwd), '{cwd}/test_ptree/z_reg_junk.zip'.format(cwd=cwd)]}
self.assertEqual(ret, expect)
def test_significantlySimilar_3(self):
cwd = os.path.dirname(os.path.realpath(__file__))
ck = TestArchiveChecker('{cwd}/test_ptree/small.zip'.format(cwd=cwd))
ret = ck.getSignificantlySimilarArches(searchDistance=2)
expect = {4: ['{cwd}/test_ptree/regular.zip'.format(cwd=cwd), '{cwd}/test_ptree/small_and_regular.zip'.format(cwd=cwd)], 5: ['{cwd}/test_ptree/regular-u.zip'.format(cwd=cwd)]}
print(expect)
print(ret)
self.assertEqual(ret, expect) |
class OptionPlotoptionsOrganizationLevelsStatesInactive(Options):
def animation(self) -> 'OptionPlotoptionsOrganizationLevelsStatesInactiveAnimation':
return self._config_sub_data('animation', OptionPlotoptionsOrganizationLevelsStatesInactiveAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def linkOpacity(self):
return self._config_get(0.1)
def linkOpacity(self, num: float):
self._config(num, js_type=False)
def opacity(self):
return self._config_get(0.1)
def opacity(self, num: float):
self._config(num, js_type=False) |
class Tree(object):
def __init__(self):
self.outmost = Root('')
self.stack = deque()
self.stack.append(self.outmost)
def clear(self):
self.outmost = Root('')
self.stack.clear()
self.stack.append(self.outmost)
def last(self):
return self.stack[(- 1)]
def nest(self, name, attr):
item = Tag(name, attr)
pointer = self.stack.pop()
pointer.append(item)
self.stack.append(pointer)
self.stack.append(item)
def dnest(self, data):
top = self.last()
item = Data(data)
top.append(item)
def xnest(self, name, attr):
top = self.last()
item = XTag(name, attr)
top.append(item)
def ynest(self, data):
top = self.last()
item = Meta(data)
top.append(item)
def mnest(self, data):
top = self.last()
item = Comment(data)
top.append(item)
def cnest(self, data):
top = self.last()
item = Code(data)
top.append(item)
def rnest(self, data):
top = self.last()
item = Amp(data)
top.append(item)
def inest(self, data):
top = self.last()
item = Pi(data)
top.append(item)
def enclose(self, name):
count = 0
for ind in reversed(self.stack):
count += 1
if (ind.name == name):
break
else:
count = 0
for i in xrange(0, count):
self.stack.pop() |
(IHeadingText)
class HeadingText(MHeadingText, LayoutWidget):
def _create_control(self, parent):
control = QtGui.QLabel(parent)
control.setSizePolicy(QtGui.QSizePolicy.Policy.Preferred, QtGui.QSizePolicy.Policy.Fixed)
return control
def _set_control_text(self, text):
text = f'<b>{text}</b>'
self.control.setText(text)
def _get_control_text(self):
text = self.control.text()
text = text[3:(- 4)]
return text |
def reverse_app(parser, token):
bits = token.split_contents()
if (len(bits) < 3):
raise TemplateSyntaxError("'reverse_app' takes at least two arguments, a namespace and a URL pattern name.")
namespaces = parser.compile_filter(bits[1])
viewname = parser.compile_filter(bits[2])
args = []
kwargs = {}
asvar = None
bits = bits[3:]
if ((len(bits) >= 2) and (bits[(- 2)] == 'as')):
asvar = bits[(- 1)]
bits = bits[:(- 2)]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if (not match):
raise TemplateSyntaxError('Malformed arguments to reverse_app tag')
(name, value) = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return ReverseAppNode(namespaces, viewname, args, kwargs, asvar) |
()
def edit_observation(observation, data_type, result):
observation_doc = frappe.get_doc('Observation', observation)
if (data_type in ['Range', 'Ratio', 'Quantity', 'Numeric']):
observation_doc.result_data = result
elif (data_type == 'Text'):
observation_doc.result_text = result
observation_doc.save() |
class MemberAccess(Expression, LValueMixin):
expression: Expression
member_name: str
base_expression_cfg = synthesized()
base_expression_value = synthesized()
def base_expression_value(self, expression):
return expression.expression_value
def base_expression_cfg(self, expression):
return expression.cfg
def expression_value(self, expression: {Expression.expression_value}):
expression_value = expression.expression_value
member_load_args = {'ast_node': self, 'base': expression_value, 'member': self.member_name, 'type_string': self.type_string}
member_load = ir.MemberLoad(**member_load_args)
if (expression.type_string in {'address', 'address payable'}):
if (self.member_name == 'balance'):
return ir.Balance(self, expression_value)
bound_function_args = (self, self.base_expression_value, self.base_expression_cfg, member_load_args)
if isinstance(expression_value, BoundFunctionBase):
if (self.member_name == 'value'):
return ValueSpecifier(*bound_function_args)
if (self.member_name == 'gas'):
return GasSpecifier(*bound_function_args)
if (hasattr(expression_value, 'value') and (expression_value.value == 'NEW')):
if (self.member_name == 'value'):
return BoundLowLevelValueCall(*bound_function_args)
if (hasattr(self, 'referenced_declaration') and (self.resolve_reference(self.referenced_declaration) is not None)):
declaration = self.resolve_reference(self.referenced_declaration)
defining_contract = declaration.find_ancestor_of_type(ContractDefinition)
if isinstance(declaration, FunctionDefinition):
if (defining_contract.contract_kind == 'library'):
if self.expression.type_identifier.startswith('t_type$_t_'):
return LibraryFunction(self)
else:
return BoundLibraryFunction(*bound_function_args)
is_internal = self.type_descriptions['typeIdentifier'].startswith('t_function_internal')
if is_internal:
return ir.NotImplementedNode(self, 'FunctionRef')
elif isinstance(declaration, FunctionDefinition):
return BoundFunction(*bound_function_args)
elif isinstance(declaration, VariableDeclaration):
parent = self.parent()
if (isinstance(declaration, VariableDeclaration) and isinstance(parent, ast.FunctionCall)):
if (parent.expression == self):
return BoundFunction(*bound_function_args)
elif isinstance(declaration, EventDefinition):
return EventCallable(declaration)
builtin_call_types = {'send': BoundSendCall, 'transfer': BoundTransferCall, 'call': BoundLowLevelCall, 'delegatecall': BoundDelegateCallable}
if (self.member_name in builtin_call_types):
return builtin_call_types[self.member_name](*bound_function_args)
if isinstance(expression, (Identifier, MemberAccess)):
declaration = self.resolve_reference(expression.referenced_declaration)
if isinstance(declaration, EnumDefinition):
return ir.Const(self, declaration.canonical_name_of(self.member_name))
if (expression.type_identifier.startswith('t_array$') or expression.type_identifier.startswith('t_bytes')):
if (self.member_name == 'push'):
return PushBuiltin(expression_value, expression.cfg)
elif (self.member_name == 'length'):
return ir.UnaryOp(self, 'length', expression_value)
if isinstance(expression_value, ir.MagicVariable):
magic = expression_value
prop_struct = builtin_map_nested[magic.variable]
prop = prop_struct.get(self.member_name, None)
if (prop is None):
raise CfgCompilationError(f"Could not resolve property '{self.member_name}' in '{self.src_code}'")
if issubclass(prop, SolidityBuiltInFunction):
return prop()
return prop(self)
return member_load
def assignment_generator(self, expression):
ev = expression.expression_value
def assignment(ast_node, expression):
return ir.MemberStore(ast_node, ev, self.member_name, expression)
return assignment
def cfg(self, expression):
if isinstance(expression, (Identifier, MemberAccess)):
declaration = self.resolve_reference(expression.referenced_declaration)
if isinstance(declaration, EnumDefinition):
return CfgSimple.statement(self.expression_value)
return (expression.cfg >> self.expression_value)
def cfg_lhs(self, expression):
return expression.cfg |
class bolt_checkpoint(write_checkpoint):
_DEFAULT_REGISTRY_KEY = 'bolt_checkpoint_key'
_PARAMS_CONTAINING_INSTANCE_ID = ['instance_id', 'publisher_id', 'partner_id', 'job', 'instance_args', 'injection_args']
_DEFAULT_COMPONENT_NAME = 'Bolt'
def _param_to_instance_id(cls, instance_id_param: str, kwargs: Dict[(str, Any)]) -> Optional[str]:
instance_id_obj = kwargs.get(instance_id_param)
if (not instance_id_obj):
return instance_id_obj
elif isinstance(instance_id_obj, str):
return instance_id_obj
elif isinstance(instance_id_obj, BoltCreateInstanceArgs):
return instance_id_obj.instance_id
elif isinstance(instance_id_obj, BoltPlayerArgs):
return instance_id_obj.create_instance_args.instance_id
elif isinstance(instance_id_obj, BoltJob):
return instance_id_obj.publisher_bolt_args.create_instance_args.instance_id
elif isinstance(instance_id_obj, BoltHookCommonInjectionArgs):
return instance_id_obj.publisher_id
else:
return super()._param_to_instance_id(instance_id_param=instance_id_param, kwargs=kwargs)
def register_run_id(cls, run_id: Optional[str], *, key: Optional[str]=None) -> str:
run_id = super().register_run_id(run_id, key=key)
if (not key):
InstanceIdtoRunIdRegistry.override_default(run_id)
return run_id
def register_trace_logger(cls, trace_logging_service: TraceLoggingService, *, key: Optional[str]=None) -> None:
super().register_trace_logger(trace_logging_service, key=key)
if (not key):
TraceLoggingRegistry.override_default(trace_logging_service) |
def parse_get_repository_response(resp: Dict[(str, Any)], downloaded_at: date) -> Repository:
return Repository(name=resp['full_name'], short_name=resp.get('name', ''), language=resp.get('language'), license=(resp.get('license').get('key') if isinstance(resp.get('license'), dict) else None), is_fork=resp.get('fork'), stargazers_count=resp.get('stargazers_count'), watchers_count=resp.get('watchers_count'), forks_count=resp.get('forks_count'), network_count=resp.get('network_count'), subscribers_count=resp.get('subscribers_count'), created_at=_parse_optional_datetime(resp.get('created_at')), updated_at=_parse_optional_datetime(resp.get('updated_at')), pushed_at=_parse_optional_datetime(resp.get('pushed_at')), downloaded_at=downloaded_at) |
_grad()
def test_encode_decode(encoder: LatentDiffusionAutoencoder, sample_image: Image.Image):
encoded = encoder.encode_image(sample_image)
decoded = encoder.decode_latents(encoded)
assert (decoded.mode == 'RGB')
assert (max(iter(decoded.getdata(band=1))) < 255)
ensure_similar_images(sample_image, decoded, min_psnr=20, min_ssim=0.9) |
def exposed_delete_gravitytales_bot_blocked_pages():
with db.session_context() as sess:
tables = [db.WebPages.__table__, version_table(db.WebPages.__table__)]
for ctbl in tables:
update = ctbl.delete().where((ctbl.c.netloc == 'gravitytales.com')).where(ctbl.c.content.like('%<div id="bot-alert" class="alert alert-info">%'))
print(update)
sess.execute(update)
sess.commit() |
class ExpandablePanel(LayoutWidget):
STYLE = wx.CLIP_CHILDREN
collapsed_image = Image(ImageResource('mycarat1'))
expanded_image = Image(ImageResource('mycarat2'))
_layers = Dict(Str)
_headers = Dict(Str)
def __init__(self, parent=None, **traits):
create = traits.pop('create', None)
super().__init__(parent=parent, **traits)
if create:
self.create()
warnings.warn('automatic widget creation is deprecated and will be removed in a future Pyface version, code should not pass the create parameter and should instead call create() explicitly', DeprecationWarning, stacklevel=2)
elif (create is not None):
warnings.warn('setting create=False is no longer required', DeprecationWarning, stacklevel=2)
def add_panel(self, name, layer):
parent = self.control
sizer = self.control.GetSizer()
header = self._create_header(parent, text=name)
sizer.Add(header, 0, wx.EXPAND)
sizer.Add(layer, 1, wx.EXPAND)
sizer.Show(layer, False)
self._layers[name] = layer
return layer
def remove_panel(self, name):
if (name not in self._layers):
return
sizer = self.control.GetSizer()
panel = self._layers[name]
header = self._headers[name]
panel.Destroy()
header.Destroy()
sizer.Layout()
def _create_control(self, parent):
panel = wx.Panel(parent, (- 1), style=self.STYLE)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
panel.SetAutoLayout(True)
return panel
def _create_header(self, parent, text):
sizer = wx.BoxSizer(wx.HORIZONTAL)
panel = wx.Panel(parent, (- 1), style=wx.CLIP_CHILDREN)
panel.SetSizer(sizer)
panel.SetAutoLayout(True)
heading = ExpandableHeader(panel, title=text)
heading.create()
sizer.Add(heading.control, 1, wx.EXPAND)
heading.observe(self._on_button, 'panel_expanded')
heading.observe(self._on_panel_closed, 'panel_closed')
sizer.Fit(panel)
self._headers[text] = panel
return panel
def _on_button(self, event):
header = event.new
name = header.title
visible = header.state
sizer = self.control.GetSizer()
sizer.Show(self._layers[name], visible)
sizer.Layout()
(w, h) = self.control.GetSize().Get()
self.control.SetSize(((w + 1), (h + 1)))
self.control.SetSize((w, h))
def _on_panel_closed(self, event):
header = event.new
name = header.title
self.remove_panel(name) |
class TestWorkflowExecutionRpc(BaseUnitTest):
def setUp(self) -> None:
super().setUp()
with mock.patch('ai_flow.task_executor.common.task_executor_base.HeartbeatManager'):
with mock.patch('ai_flow.rpc.service.scheduler_service.get_notification_client', MockNotificationClient):
with mock.patch('ai_flow.task_executor.task_executor.TaskExecutorFactory.get_task_executor'):
self.server = AIFlowServer()
self.server.run(is_block=False)
self.client = get_scheduler_client()
self.notification_client = self.server.scheduler_service.notification_client
self.workflow_meta = self.prepare_workflow()
def tearDown(self) -> None:
self.server.stop()
super().tearDown()
def prepare_workflow(self):
with Workflow(name='workflow1') as workflow:
BashOperator(name='bash', bash_command='echo 1')
workflow_meta = self.client.add_workflow(workflow.name, 'default', 'mock_content', cloudpickle.dumps(workflow))
self.client.add_workflow_snapshot(workflow_meta.id, 'uri', cloudpickle.dumps(workflow), 'md5')
return workflow_meta
def prepare_workflow_execution(workflow_id, snapshot_id):
with create_session() as session:
metadata_manager = MetadataManager(session)
metadata_manager.add_workflow_execution(workflow_id=workflow_id, run_type='manual', snapshot_id=snapshot_id)
metadata_manager.add_workflow_execution(workflow_id=workflow_id, run_type='manual', snapshot_id=snapshot_id)
metadata_manager.update_workflow_execution(1, WorkflowStatus.FAILED)
def test_start_workflow_execution(self):
id = self.client.start_workflow_execution(workflow_name=self.workflow_meta.name, namespace=self.workflow_meta.namespace)
self.assertEqual(1, id)
def test_stop_workflow_execution(self):
self.client.stop_workflow_execution(1)
self.assertEqual(SchedulingEventType.STOP_WORKFLOW_EXECUTION, self.notification_client.list_events()[0].value)
self.assertEqual(json.dumps({'workflow_execution_id': 1}), self.notification_client.list_events()[0].context)
def test_stop_workflow_executions(self):
self.prepare_workflow_execution(1, 1)
self.client.stop_workflow_executions(namespace=self.workflow_meta.namespace, workflow_name=self.workflow_meta.name)
self.assertEqual(SchedulingEventType.STOP_WORKFLOW_EXECUTION, self.notification_client.list_events()[0].value)
self.assertEqual(json.dumps({'workflow_execution_id': 2}), self.notification_client.list_events()[0].context)
def test_delete_workflow_execution(self):
self.prepare_workflow_execution(1, 1)
self.assertEqual(2, len(self.client.list_workflow_executions(workflow_name=self.workflow_meta.name, namespace=self.workflow_meta.namespace)))
self.client.delete_workflow_execution(1)
self.assertEqual(1, len(self.client.list_workflow_executions(workflow_name=self.workflow_meta.name, namespace=self.workflow_meta.namespace)))
with self.assertRaisesRegex(AIFlowException, 'not finished, cannot be removed'):
self.client.delete_workflow_execution(2)
def test_get_workflow_execution(self):
self.prepare_workflow_execution(1, 1)
execution = self.client.get_workflow_execution(1)
self.assertEqual(1, execution.id)
self.assertEqual(1, execution.workflow_id)
self.assertEqual(WorkflowStatus.FAILED.value, execution.status)
self.assertEqual('manual', execution.run_type)
self.assertEqual(1, execution.snapshot_id)
self.assertEqual((- 1), execution.event_offset)
def test_list_workflow_executions(self):
self.prepare_workflow_execution(1, 1)
executions = self.client.list_workflow_executions(workflow_name=self.workflow_meta.name, namespace=self.workflow_meta.namespace)
self.assertEqual(2, len(executions))
executions = self.client.list_workflow_executions(workflow_name=self.workflow_meta.name, namespace=self.workflow_meta.namespace, page_size=2, offset=1)
self.assertEqual(1, len(executions))
def test_list_non_exists_workflow_executions(self):
with self.assertRaisesRegex(AIFlowException, 'Workflow invalid.invalid not exists'):
self.client.list_workflow_executions('invalid', 'invalid')
def test_stop_non_exists_workflow_executions(self):
with self.assertRaisesRegex(AIFlowException, 'Workflow invalid.invalid not exists'):
self.client.stop_workflow_executions('invalid', 'invalid') |
class OptionSeriesWaterfallSonificationDefaultinstrumentoptionsMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'log_syslogd4_override_filter': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['log_syslogd4_override_filter']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['log_syslogd4_override_filter']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'log_syslogd4_override_filter')
(is_error, has_changed, result, diff) = fortios_log_syslogd4(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def int2c2e3d_14(ax, da, A, bx, db, B):
result = numpy.zeros((3, 15), dtype=float)
x0 = (ax + bx)
x1 = (x0 ** (- 1.0))
x2 = ((- x1) * ((ax * A[0]) + (bx * B[0])))
x3 = ((- x2) - A[0])
x4 = ((- x2) - B[0])
x5 = (bx ** (- 1.0))
x6 = (ax * x1)
x7 = ((bx * x6) * ((((A[0] - B[0]) ** 2) + ((A[1] - B[1]) ** 2)) + ((A[2] - B[2]) ** 2)))
x8 = boys(4, x7)
x9 = 17.
x10 = ((2.0 * x5) * x9)
x11 = ((x0 ** (- 1.5)) * x10)
x12 = (x11 * x8)
x13 = (x12 * x4)
x14 = (ax ** (- 1.0))
x15 = (x0 ** (- 0.5))
x16 = boys(3, x7)
x17 = (0.5 * x5)
x18 = (x17 * ((- x12) + (((((2.0 * x14) * x15) * x16) * x5) * x9)))
x19 = boys(5, x7)
x20 = (x4 ** 2)
x21 = (x14 * x15)
x22 = (x10 * x21)
x23 = (x20 * x22)
x24 = (x19 * x23)
x25 = boys(2, x7)
x26 = (x17 * (((- x11) * x25) + (((((2.0 * x14) * x15) * x5) * x9) * boys(1, x7))))
x27 = (x11 * x16)
x28 = (x17 * ((((((2.0 * x14) * x15) * x25) * x5) * x9) - x27))
x29 = (x23 * x8)
x30 = (x28 + x29)
x31 = (x16 * x22)
x32 = (1.5 * x5)
x33 = ((x32 * (((x20 * x31) + x26) - (x30 * x6))) + (x4 * ((x4 * (x18 + x24)) - (x5 * (x13 - ((((((2.0 * x14) * x15) * x16) * x4) * x5) * x9))))))
x34 = (0.5 / (ax + bx))
x35 = ((x34 * x4) * (x30 + (x5 * ((((((2.0 * x14) * x15) * x25) * x5) * x9) - x27))))
x36 = (da * db)
x37 = (0. * x36)
x38 = ((- x1) * ((ax * A[1]) + (bx * B[1])))
x39 = ((- x38) - B[1])
x40 = (x12 * x39)
x41 = (x5 * (((((((2.0 * x14) * x15) * x16) * x39) * x5) * x9) - x40))
x42 = (x24 * x39)
x43 = ((((- x39) * x5) * (x13 - ((((((2.0 * x14) * x15) * x16) * x4) * x5) * x9))) + ((0.5 * x4) * (x41 + (2.0 * x42))))
x44 = ((x39 * x5) * ((((((2.0 * x14) * x15) * x25) * x5) * x9) - x27))
x45 = ((0.5 * x34) * (((2.0 * x29) * x39) + x44))
x46 = (0. * x36)
x47 = ((- x1) * ((ax * A[2]) + (bx * B[2])))
x48 = ((- x47) - B[2])
x49 = ((x48 * x5) * ((- x12) + (((((2.0 * x14) * x15) * x16) * x5) * x9)))
x50 = (0.5 * x49)
x51 = ((x4 * ((x24 * x48) + x50)) - ((x48 * x5) * (x13 - ((((((2.0 * x14) * x15) * x16) * x4) * x5) * x9))))
x52 = ((x48 * x5) * ((((((2.0 * x14) * x15) * x25) * x5) * x9) - x27))
x53 = (0.5 * x52)
x54 = (x34 * ((x29 * x48) + x53))
x55 = (x39 ** 2)
x56 = (x22 * x55)
x57 = (x19 * x56)
x58 = (x18 + x57)
x59 = (x56 * x8)
x60 = (x28 + x59)
x61 = ((x26 + (x31 * x55)) - (x6 * x60))
x62 = ((x17 * x61) + (x20 * x58))
x63 = (x34 * x4)
x64 = (x60 * x63)
x65 = (0. * x36)
x66 = ((x48 * x5) * (((((((2.0 * x14) * x15) * x16) * x39) * x5) * x9) - x40))
x67 = ((x42 * x48) + (0.5 * x66))
x68 = (((((((4.0 * x21) * x39) * x48) * x5) * x63) * x8) * x9)
x69 = (1. * x65)
x70 = (x48 ** 2)
x71 = (x22 * x70)
x72 = (x18 + (x19 * x71))
x73 = (x28 + (x71 * x8))
x74 = ((x26 + (x31 * x70)) - (x6 * x73))
x75 = (x17 * x74)
x76 = ((x20 * x72) + x75)
x77 = (x34 * x73)
x78 = (x4 * x77)
x79 = (x34 * ((x39 * x60) + x44))
x80 = ((x39 * x58) + x41)
x81 = (x3 * x4)
x82 = (x34 * ((x48 * x59) + x53))
x83 = ((x48 * x57) + x50)
x84 = (x39 * x77)
x85 = (x39 * x72)
x86 = (x34 * ((x48 * x73) + x52))
x87 = ((x48 * x72) + x49)
x88 = ((x32 * x61) + (x39 * x80))
x89 = (x3 * x37)
x90 = ((x39 * x83) + x66)
x91 = (x3 * x46)
x92 = ((x55 * x72) + x75)
x93 = (x39 * x87)
x94 = ((x32 * x74) + (x48 * x87))
x95 = ((- x38) - A[1])
x96 = (x37 * x95)
x97 = (x4 * x95)
x98 = ((- x47) - A[2])
x99 = (x37 * x98)
x100 = (x4 * x98)
result[(0, 0)] = numpy.sum((x37 * ((x3 * x33) + (4.0 * x35))))
result[(0, 1)] = numpy.sum((x46 * ((x3 * x43) + (3.0 * x45))))
result[(0, 2)] = numpy.sum((x46 * ((x3 * x51) + (3.0 * x54))))
result[(0, 3)] = numpy.sum((x65 * ((x3 * x62) + (2.0 * x64))))
result[(0, 4)] = numpy.sum((x69 * ((x3 * x67) + x68)))
result[(0, 5)] = numpy.sum((x65 * ((x3 * x76) + (2.0 * x78))))
result[(0, 6)] = numpy.sum((x46 * (x79 + (x80 * x81))))
result[(0, 7)] = numpy.sum((x69 * ((x81 * x83) + x82)))
result[(0, 8)] = numpy.sum((x69 * ((x81 * x85) + x84)))
result[(0, 9)] = numpy.sum((x46 * ((x81 * x87) + x86)))
result[(0, 10)] = numpy.sum((x88 * x89))
result[(0, 11)] = numpy.sum((x90 * x91))
result[(0, 12)] = numpy.sum(((x3 * x65) * x92))
result[(0, 13)] = numpy.sum((x91 * x93))
result[(0, 14)] = numpy.sum((x89 * x94))
result[(1, 0)] = numpy.sum((x33 * x96))
result[(1, 1)] = numpy.sum((x46 * (x35 + (x43 * x95))))
result[(1, 2)] = numpy.sum(((x46 * x51) * x95))
result[(1, 3)] = numpy.sum((x65 * ((2.0 * x45) + (x62 * x95))))
result[(1, 4)] = numpy.sum((x69 * (x54 + (x67 * x95))))
result[(1, 5)] = numpy.sum(((x65 * x76) * x95))
result[(1, 6)] = numpy.sum((x46 * ((3.0 * x64) + (x80 * x97))))
result[(1, 7)] = numpy.sum((x69 * (x68 + (x83 * x97))))
result[(1, 8)] = numpy.sum((x69 * (x78 + (x85 * x97))))
result[(1, 9)] = numpy.sum(((x46 * x87) * x97))
result[(1, 10)] = numpy.sum((x37 * ((4.0 * x79) + (x88 * x95))))
result[(1, 11)] = numpy.sum((x46 * ((3.0 * x82) + (x90 * x95))))
result[(1, 12)] = numpy.sum((x65 * ((2.0 * x84) + (x92 * x95))))
result[(1, 13)] = numpy.sum((x46 * (x86 + (x93 * x95))))
result[(1, 14)] = numpy.sum((x94 * x96))
result[(2, 0)] = numpy.sum((x33 * x99))
result[(2, 1)] = numpy.sum(((x43 * x46) * x98))
result[(2, 2)] = numpy.sum((x46 * (x35 + (x51 * x98))))
result[(2, 3)] = numpy.sum(((x62 * x65) * x98))
result[(2, 4)] = numpy.sum((x69 * (x45 + (x67 * x98))))
result[(2, 5)] = numpy.sum((x65 * ((2.0 * x54) + (x76 * x98))))
result[(2, 6)] = numpy.sum(((x100 * x46) * x80))
result[(2, 7)] = numpy.sum((x69 * ((x100 * x83) + x64)))
result[(2, 8)] = numpy.sum((x69 * ((x100 * x85) + x68)))
result[(2, 9)] = numpy.sum((x46 * ((x100 * x87) + (3.0 * x78))))
result[(2, 10)] = numpy.sum((x88 * x99))
result[(2, 11)] = numpy.sum((x46 * (x79 + (x90 * x98))))
result[(2, 12)] = numpy.sum((x65 * ((2.0 * x82) + (x92 * x98))))
result[(2, 13)] = numpy.sum((x46 * ((3.0 * x84) + (x93 * x98))))
result[(2, 14)] = numpy.sum((x37 * ((4.0 * x86) + (x94 * x98))))
return result |
def test_multiaddr_from_string():
maddr_str = (((('/dns4/' + HOST) + '/tcp/') + str(PORT)) + '/p2p/')
maddr = MultiAddr.from_string((maddr_str + PEER_ID))
assert ((maddr.host == HOST) and (maddr.port == PORT) and (maddr.peer_id == PEER_ID))
with pytest.raises(ValueError):
MultiAddr.from_string('')
with pytest.raises(ValueError):
MultiAddr.from_string((maddr_str + 'wrong-peer-id')) |
class CliBoundArguments(object):
threshold = 0.75
sig = attr.ib()
in_args: typing.Tuple = attr.ib(converter=tuple)
name = attr.ib()
func = attr.ib(default=None)
post_name = attr.ib(default=attr.Factory(list))
args = attr.ib(default=attr.Factory(list))
kwargs = attr.ib(default=attr.Factory(dict))
meta = attr.ib(default=attr.Factory(dict))
posparam = attr.ib(init=False)
namedparams = attr.ib(init=False)
unsatisfied = attr.ib(init=False)
not_provided = attr.ib(init=False)
posarg_only = attr.ib(init=False)
skip = attr.ib(init=False)
def process_arguments(self):
self.posparam = iter(self.sig.positional)
self.namedparams = dict(self.sig.aliases)
self.unsatisfied = set(self.sig.required)
self.not_provided = set(self.sig.optional)
self.sticky = None
self.posarg_only = False
self.skip = 0
with _SeekFallbackCommand():
for (i, arg) in enumerate(self.in_args):
if (self.skip > 0):
self.skip -= 1
continue
with errors.SetArgumentErrorContext(pos=i, val=arg, ba=self):
if (self.posarg_only or (len(arg) < 2) or (arg[0] != '-')):
if (self.sticky is not None):
param = self.sticky
else:
try:
param = next(self.posparam)
except StopIteration:
exc = errors.TooManyArguments(self.in_args[i:])
exc.__cause__ = None
raise exc
elif (arg == '--'):
self.posarg_only = True
continue
else:
if arg.startswith('--'):
name = arg.partition('=')[0]
else:
name = arg[:2]
try:
param = self.namedparams[name]
except KeyError:
raise errors.UnknownOption(name)
with errors.SetArgumentErrorContext(param=param):
param.read_argument(self, i)
param.apply_generic_flags(self)
if (not self.func):
if self.unsatisfied:
unsatisfied = []
for p in self.unsatisfied:
with errors.SetArgumentErrorContext(param=p):
if p.unsatisfied(self):
unsatisfied.append(p)
if unsatisfied:
raise errors.MissingRequiredArguments(unsatisfied)
for p in self.sig.parameters.values():
p.post_parse(self)
del self.sticky, self.posarg_only, self.skip, self.unsatisfied, self.not_provided
def get_best_guess(self, passed_in_arg):
return util.closest_option(passed_in_arg, list(self.sig.aliases))
def __iter__(self):
(yield self.func)
(yield self.post_name)
(yield self.args)
(yield self.kwargs) |
class Rule(object):
def __init__(self, rule_name, rule_index, rules):
self.rule_name = rule_name
self.rule_index = rule_index
self.rules = rules
def find_violations(self, cloudsql_acl):
if (not cloudsql_acl.ipv4_enabled):
return
is_instance_name_violated = True
is_authorized_networks_violated = True
is_require_ssl_violated = True
is_instance_name_violated = re.match(self.rules.instance_name, cloudsql_acl.instance_name)
authorized_networks_regex = re.compile(self.rules.authorized_networks)
is_authorized_networks_violated = any((net for net in cloudsql_acl.authorized_networks if authorized_networks_regex.match(net)))
if (self.rules.require_ssl is None):
is_require_ssl_violated = None
else:
is_require_ssl_violated = (self.rules.require_ssl == cloudsql_acl.require_ssl)
should_raise_violation = (((is_instance_name_violated is not None) and is_instance_name_violated) and ((is_authorized_networks_violated is not None) and is_authorized_networks_violated) and ((is_require_ssl_violated is not None) and is_require_ssl_violated))
if should_raise_violation:
(yield self.RuleViolation(resource_name=cloudsql_acl.instance_name, resource_type=resource_mod.ResourceType.CLOUD_SQL_INSTANCE, resource_id=cloudsql_acl.instance_name, project_id=cloudsql_acl.project_id, full_name=cloudsql_acl.full_name, rule_name=self.rule_name, rule_index=self.rule_index, violation_type='CLOUD_SQL_VIOLATION', instance_name=cloudsql_acl.instance_name, authorized_networks=cloudsql_acl.authorized_networks, require_ssl=cloudsql_acl.require_ssl, resource_data=cloudsql_acl.json))
RuleViolation = namedtuple('RuleViolation', ['resource_type', 'resource_id', 'full_name', 'rule_name', 'rule_index', 'violation_type', 'instance_name', 'authorized_networks', 'require_ssl', 'resource_data', 'resource_name', 'project_id']) |
.parametrize('mat_type', ['nest', 'aij'])
.parametrize('scalar', [False, True], ids=['Vector', 'Scalar'])
def test_partially_mixed_mat(V, Q, mat_type, scalar):
W = (V * Q)
(u, p) = TrialFunctions(W)
if scalar:
v = TestFunction(V)
a = (inner(u, v) * dx)
idx = (0, 0)
other = (0, 1)
else:
q = TestFunction(Q)
a = (inner(p, q) * dx)
idx = (0, 1)
other = (0, 0)
A = assemble(a, mat_type=mat_type).M
assert np.allclose(A[idx].values.diagonal(), 0.125)
assert np.allclose(A[other].values, 0.0) |
class TestRestrictedNamedTraitObserverWithWrappedObserver(unittest.TestCase):
def test_notify_inherited(self):
wrapped_observer = DummyObserver(notify=False)
observer = _RestrictedNamedTraitObserver(name='name', wrapped_observer=wrapped_observer)
self.assertEqual(observer.notify, wrapped_observer.notify)
def test_notifier_inherited(self):
notifier = DummyNotifier()
wrapped_observer = DummyObserver(notifier=notifier)
observer = _RestrictedNamedTraitObserver(name='name', wrapped_observer=wrapped_observer)
self.assertEqual(observer.get_notifier(None, None, None), notifier)
def test_maintainer_inherited(self):
maintainer = DummyNotifier()
wrapped_observer = DummyObserver(maintainer=maintainer)
observer = _RestrictedNamedTraitObserver(name='name', wrapped_observer=wrapped_observer)
self.assertEqual(observer.get_maintainer(None, None, None, None), maintainer) |
class OptionPlotoptionsLollipopSonificationDefaultinstrumentoptionsMappingLowpass(Options):
def frequency(self) -> 'OptionPlotoptionsLollipopSonificationDefaultinstrumentoptionsMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsLollipopSonificationDefaultinstrumentoptionsMappingLowpassFrequency)
def resonance(self) -> 'OptionPlotoptionsLollipopSonificationDefaultinstrumentoptionsMappingLowpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsLollipopSonificationDefaultinstrumentoptionsMappingLowpassResonance) |
.django_db
def test_parent_recipient_preference_to_uei(recipient_lookup):
recipient_parameters = {'recipient_name': 'Parent Recipient Test', 'recipient_uei': '123', 'parent_recipient_uei': None, 'recipient_unique_id': 'NOT A REAL DUNS', 'parent_recipient_unique_id': None, 'is_parent_recipient': True}
expected_result = 'f5ba3b35-167d-8f32-57b0-406c3479de90-P'
assert (obtain_recipient_uri(**recipient_parameters) == expected_result) |
class TestExpressions():
def test_special(self):
assert (py2js('') == '')
assert (py2js(' \n') == '')
def test_ops(self):
assert (py2js('2+3') == '2 + 3;')
assert (py2js('2/3') == '2 / 3;')
assert (py2js('not 2') == '!2;')
assert (py2js('-(2+3)') == '-(2 + 3);')
assert (py2js('True and False') == 'true && false;')
assert (py2js('foo - bar') == 'foo - bar;')
assert (py2js('_foo3 - _bar4') == '_foo3 - _bar4;')
assert (py2js('3 - 4') == '3 - 4;')
assert (py2js('"abc" - "def"') == '"abc" - "def";')
assert (py2js("'abc' - 'def'") == '"abc" - "def";')
assert (py2js('\'"abc" - "def"\'') == '"\\"abc\\" - \\"def\\"";')
assert (py2js('foo - bar > 4') == '(foo - bar) > 4;')
assert (evalpy('2+3') == '5')
assert (evalpy('6/3') == '2')
assert (evalpy('4//3') == '1')
assert (evalpy('2**8') == '256')
assert (evalpy('not True') == 'false')
assert (evalpy('0-3') == '-3')
assert (evalpy('True and False') == 'false')
assert (evalpy('True or False') == 'true')
assert (evalpy('(9-3-3)/3') == '1')
def test_string_formatting1(self):
assert (evalpy('"%s" % "bar"') == 'bar')
assert (evalpy('"-%s-" % "bar"') == '-bar-')
assert (evalpy('"foo %s foo" % "bar"') == 'foo bar foo')
assert (evalpy('"x %i" % 6') == 'x 6')
assert (evalpy('"x %g" % 6') == 'x 6')
assert (evalpy('"%s: %f" % ("value", 6)') == 'value: 6.000000')
assert (evalpy('"%r: %r" % ("value", 6)') == '"value": 6')
def test_string_formatting2(self):
py2jslight = (lambda x: py2js(x, inline_stdlib=False))
assert (py2jslight("'hi %i' % a") == py2jslight("'hi {:i}'.format(a)"))
assert (py2jslight("'hi %i %+i' % (a, b)") == py2jslight("'hi {:i} {:+i}'.format(a, b)"))
assert (py2jslight("'hi %f %1.2f' % (a, b)") == py2jslight("'hi {:f} {:1.2f}'.format(a, b)"))
assert (py2jslight("'hi %s %r' % (a, b)") == py2jslight("'hi {} {!r}'.format(a, b)"))
if (sys.version_info < (3, 6)):
return
assert (py2jslight("f'hi {a:i}'") == py2jslight("'hi {:i}'.format(a)"))
assert (py2js("f'hi {a:i} {b:+i}'") == py2js("'hi {:i} {:+i}'.format(a, b)"))
assert (py2jslight("f'hi {a:f} {b:1.2f}'") == py2jslight("'hi {:f} {:1.2f}'.format(a, b)"))
assert (py2jslight("f'hi {a} {b!r}'") == py2jslight("'hi {} {!r}'.format(a, b)"))
def test_string_formatting3(self):
x = 'a = 3.; b = 7; c = "foo"; d = .35; e = 0.;'
assert (evalpy((x + "'hi {:i}'.format(b)")) == 'hi 7')
assert (evalpy((x + "'hi {:03i} {:+03i}'.format(b, b)")) == 'hi 007 +07')
assert (evalpy((x + "'hi {:03} {:+03}'.format(b, b)")) == 'hi 007 +07')
assert (evalpy((x + "'hi {:i} {:+i} {: i}'.format(b, b, b)")) == 'hi 7 +7 7')
assert (evalpy((x + "'hi {:f} {:1.0f} {:1.2f}'.format(a, a, a)")) == 'hi 3.141593 3 3.14')
assert (evalpy((x + "'hi {:05f} {:05.1f} {:+05.1f}'.format(a, a, a)")) == 'hi 3.141593 003.1 +03.1')
assert (evalpy((x + "'hi {:g} {:.1g} {:.3g}'.format(a, a, a)")) == 'hi 3.14159 3 3.14')
assert (evalpy((x + "'hi {:g} {:.1g} {:.3g}'.format(d, d, d)")) == 'hi 3.14159e+08 3e+08 3.14e+08')
assert (evalpy((x + "'hi {:g} {:.1g} {:.3g}'.format(e, e, e)")) == 'hi 0. 0.003 0.00314')
assert (evalpy((x + "'hi {:05g} {:05.1g} {:+05.1g}'.format(a, a, a)")) == 'hi 3.14159 00003 +0003')
assert (evalpy((x + "'hi {} {!s} {!r}'.format(c, c, c)")) == 'hi foo foo "foo"')
def test_string_formatting4(self):
x = 'a = 3; b = 4; '
assert (evalpy((x + "'hi {1:g} {1:+g} {0}'.format(a, b)")) == 'hi 4 +4 3')
assert (evalpy((x + "t = 'hi {} {}'; t.format(a, b)")) == 'hi 3 4')
def test_overloaded_list_ops(self):
assert (evalpy('[1, 2] + [3, 4]') == '[ 1, 2, 3, 4 ]')
assert (evalpy('[3, 4] + [1, 2]') == '[ 3, 4, 1, 2 ]')
assert (evalpy('"ab" + "cd"') == 'abcd')
assert (evalpy('[3, 4] * 2') == '[ 3, 4, 3, 4 ]')
assert (evalpy('2 * [3, 4]') == '[ 3, 4, 3, 4 ]')
assert (evalpy('"ab" * 2') == 'abab')
assert (evalpy('2 * "ab"') == 'abab')
assert (evalpy('a = [1, 2]; a += [3, 4]; a') == '[ 1, 2, 3, 4 ]')
assert (evalpy('a = [3, 4]; a += [1, 2]; a') == '[ 3, 4, 1, 2 ]')
assert (evalpy('a = [3, 4]; a *= 2; a') == '[ 3, 4, 3, 4 ]')
assert (evalpy('a = "ab"; a *= 2; a') == 'abab')
def test_raw_js_overloading(self):
s1 = 'a=3; b=4; c=1; a + b - c'
s2 = 'a=3; b=4; c=1; RawJS("a + b") - c'
assert (evalpy(s1) == '6')
assert (evalpy(s2) == '6')
assert ('pyfunc' in py2js(s1))
assert ('pyfunc' not in py2js(s2))
def test_overload_funcs_dont_overload_real_funcs(self):
assert (evalpy('def add(a, b): return a-b\n\nadd(4, 1)') == '3')
assert (evalpy('def op_add(a, b): return a-b\n\nop_add(4, 1)') == '3')
def test_comparisons(self):
assert (py2js('4 > 3') == '4 > 3;')
assert (py2js('4 is 3') == '4 === 3;')
assert (evalpy('4 > 4') == 'false')
assert (evalpy('4 >= 4') == 'true')
assert (evalpy('4 < 3') == 'false')
assert (evalpy('4 <= 4') == 'true')
assert (evalpy('4 == 3') == 'false')
assert (evalpy('4 != 3') == 'true')
assert (evalpy('4 == "4"') == 'true')
assert (evalpy('4 is "4"') == 'false')
assert (evalpy('4 is not "4"') == 'true')
assert (evalpy('"c" in "abcd"') == 'true')
assert (evalpy('"x" in "abcd"') == 'false')
assert (evalpy('"x" not in "abcd"') == 'true')
assert (evalpy('3 in [1,2,3,4]') == 'true')
assert (evalpy('9 in [1,2,3,4]') == 'false')
assert (evalpy('9 not in [1,2,3,4]') == 'true')
assert (evalpy('"bar" in {"foo": 3}') == 'false')
assert (evalpy('"foo" in {"foo": 3}') == 'true')
assert (evalpy('not (1 is null and 1 is null)') == 'true')
def test_deep_comparisons(self):
arr = '[(1,2), (3,4), (5,6), (1,2), (7,8)]\n'
assert (evalpy((('a=' + arr) + '(1,2) in a')) == 'true')
assert (evalpy((('a=' + arr) + '(7,8) in a')) == 'true')
assert (evalpy((('a=' + arr) + '(3,5) in a')) == 'false')
assert (evalpy((('a=' + arr) + '3 in a')) == 'false')
assert (evalpy('(2, 3) == (2, 3)') == 'true')
assert (evalpy('[2, 3] == [2, 3]') == 'true')
assert (evalpy((((('a=' + arr) + 'b=') + arr) + 'a==b')) == 'true')
dct = '{"a":7, 3:"foo", "bar": 1, "9": 3}\n'
assert (evalpy((('d=' + dct) + '"a" in d')) == 'true')
assert (evalpy((('d=' + dct) + '"3" in d')) == 'true')
assert (evalpy((('d=' + dct) + '3 in d')) == 'true')
assert (evalpy((('d=' + dct) + '"bar" in d')) == 'true')
assert (evalpy((('d=' + dct) + '9 in d')) == 'true')
assert (evalpy((('d=' + dct) + '"9" in d')) == 'true')
assert (evalpy((('d=' + dct) + '7 in d')) == 'false')
assert (evalpy((('d=' + dct) + '"1" in d')) == 'false')
assert (evalpy('{2: 3} == {"2": 3}') == 'true')
assert (evalpy('dict(foo=7) == {"foo": 7}') == 'true')
assert (evalpy((((('a=' + dct) + 'b=') + dct) + 'a==b')) == 'true')
assert (evalpy('{"foo": 1, "bar": 2}=={"bar": 2, "foo": 1}') == 'true')
assert (evalpy('{"bar": 2, "foo": 1}=={"foo": 1, "bar": 2}') == 'true')
d1 = 'd1={"foo": [2, 3, {1:2,3:4,5:["aa", "bb"]}], "bar": None}\n'
d2 = 'd2={"bar": None, "foo": [2, 3, {5:["aa", "bb"],1:2,3:4}]}\n'
d3 = 'd3={"foo": [2, 3, {1:2,3:4,5:["aa", "b"]}], "bar": None}\n'
assert (evalpy((((d1 + d2) + d3) + 'd1 == d2')) == 'true')
assert (evalpy((((d1 + d2) + d3) + 'd2 == d1')) == 'true')
assert (evalpy((((d1 + d2) + d3) + 'd1 != d2')) == 'false')
assert (evalpy((((d1 + d2) + d3) + 'd1 == d3')) == 'false')
assert (evalpy((((d1 + d2) + d3) + 'd1 != d3')) == 'true')
assert (evalpy((((d1 + d2) + d3) + 'd2 in [2, d1, 4]')) == 'true')
assert (evalpy((((d1 + d2) + d3) + 'd2 in ("xx", d2, None)')) == 'true')
assert (evalpy((((d1 + d2) + d3) + 'd2 not in (1, d3, 2)')) == 'true')
assert (evalpy((((d1 + d2) + d3) + '4 in [2, d1, 4]')) == 'true')
def test_truthfulness_of_basic_types(self):
assert (evalpy('"T" if (1) else "F"') == 'T')
assert (evalpy('"T" if (0) else "F"') == 'F')
assert (evalpy('"T" if ("a") else "F"') == 'T')
assert (evalpy('"T" if ("") else "F"') == 'F')
assert (evalpy('None is null') == 'true')
assert (evalpy('None is undefined') == 'false')
assert (evalpy('undefined is undefined') == 'true')
def test_truthfulness_of_array_and_dict(self):
assert (evalpy('bool([1])') == 'true')
assert (evalpy('bool([])') == 'false')
assert (evalpy('"T" if ([1, 2, 3]) else "F"') == 'T')
assert (evalpy('"T" if ([]) else "F"') == 'F')
assert (evalpy('if [1]: "T"\nelse: "F"') == 'T')
assert (evalpy('if []: "T"\nelse: "F"') == 'F')
assert (evalpy('if [1] and 1: "T"\nelse: "F"') == 'T')
assert (evalpy('if [] and 1: "T"\nelse: "F"') == 'F')
assert (evalpy('if [] or 1: "T"\nelse: "F"') == 'T')
assert (evalpy('[2] or 42') == '[ 2 ]')
assert (evalpy('[] or 42') == '42')
assert (evalpy('bool({1:2})') == 'true')
assert (evalpy('bool({})') == 'false')
assert (evalpy('"T" if ({"foo": 3}) else "F"') == 'T')
assert (evalpy('"T" if ({}) else "F"') == 'F')
assert (evalpy('if {1:2}: "T"\nelse: "F"') == 'T')
assert (evalpy('if {}: "T"\nelse: "F"') == 'F')
assert (evalpy('if {1:2} and 1: "T"\nelse: "F"') == 'T')
assert (evalpy('if {} and 1: "T"\nelse: "F"') == 'F')
assert (evalpy('if {} or 1: "T"\nelse: "F"') == 'T')
assert (evalpy('{1:2} or 42') == "{ '1': 2 }")
assert (evalpy('{} or 42') == '42')
assert (evalpy('{} or 0') == '0')
assert (evalpy('None or []') == '[]')
assert (evalpy('null or 42') == '42')
assert (evalpy('ArrayBuffer(4) or 42') != '42')
assert py2js('if foo: pass').count('_truthy')
assert (py2js('if foo.length: pass').count('_truthy') == 0)
assert (py2js('if 3: pass').count('_truthy') == 0)
assert (py2js('if True: pass').count('_truthy') == 0)
assert (py2js('if a == 3: pass').count('_truthy') == 0)
assert (py2js('if a is 3: pass').count('_truthy') == 0)
def test_indexing_and_slicing(self):
c = 'a = [1, 2, 3, 4, 5]\n'
assert (evalpy((c + 'a[2]')) == '3')
assert (evalpy((c + 'a[-2]')) == '4')
assert (evalpy((c + 'a[:]')) == '[ 1, 2, 3, 4, 5 ]')
assert (evalpy((c + 'a[1:-1]')) == '[ 2, 3, 4 ]')
def test_assignments(self):
assert (py2js('foo = 3') == 'var foo;\nfoo = 3;')
assert (py2js('foo.bar = 3') == 'foo.bar = 3;')
assert (py2js('foo[i] = 3') == 'foo[i] = 3;')
code = py2js('foo = 3; bar = 4')
assert (code.count('var') == 1)
code = py2js('foo = 3; foo = 4')
assert (code.count('var') == 1)
code = py2js('foo = bar = 3')
assert ('foo = bar = 3' in code)
assert ('var bar, foo' in code)
assert (py2js('self') == 'this;')
assert (py2js('self.foo') == 'this.foo;')
assert (evalpy('a=[0,0]\na[0]=2\na[1]=3\na', False) == '[2,3]')
(evalpy('x=[1,2,3]\na, b, c = x\nb', False) == '2')
(evalpy('a,b,c = [1,2,3]\nc,b,a = a,b,c\n[a,b,c]', False) == '[3,2,1]')
assert (py2js('xx, yy = 3, 4').count('xx') == 2)
assert (py2js('xx[0], yy[0] = 3, 4').count('xx') == 1)
assert (py2js('xx.a, yy.a = 3, 4').count('xx') == 1)
code = py2js('class Foo:\n bar=3\n bar = bar + 1')
assert (code.count('bar') == 3)
assert (code.count('Foo.prototype.bar') == 3)
def test_aug_assignments(self):
assert (evalpy('x=5; x+=1; x') == '6')
assert (evalpy('x=5; x/=2; x') == '2.5')
assert (evalpy('x=5; x**=2; x') == '25')
assert (evalpy('x=5; x//=2; x') == '2')
def test_basic_types(self):
assert (py2js('True') == 'true;')
assert (py2js('False') == 'false;')
assert (py2js('None') == 'null;')
assert (py2js('"bla\\"bla"') == '"bla\\"bla";')
assert (py2js('3') == '3;')
assert (py2js('3.1415') == '3.1415;')
assert (py2js('[1,2,3]') == '[1, 2, 3];')
assert (py2js('(1,2,3)') == '[1, 2, 3];')
assert (py2js('{"foo": 3, "bar": 4}') == '({foo: 3, bar: 4});')
assert (evalpy('a={"foo": 3, "bar": 4};a') == '{ foo: 3, bar: 4 }')
def test_dict_literals(self):
def tester1():
a = 'foo'
d = {a: 'bar1', 2: 'bar2', ('sp' + 'am'): 'bar3'}
print(d.foo, d[2], d.spam)
js = py2js(tester1)
assert (evaljs((js + 'tester1()')) == 'bar1 bar2 bar3\nnull')
def test_ignore_import_of_compiler(self):
modname = pscript.__name__
assert (py2js(('from %s import x, y, z\n42' % modname)) == '42;')
def test_import(self):
with raises(JSError):
py2js('import time')
import time
assert (abs((float(evalpy('time()')) - time.time())) < 0.5)
evalpy('t0=perf_counter(); t1=perf_counter(); (t1-t0)').startswith('0.0')
def test_funcion_call(self):
jscode = 'var foo = function (x, y) {return x+y;};'
assert (evaljs((jscode + py2js('foo(2,2)'))) == '4')
assert (evaljs((jscode + py2js('foo("so ", True)'))) == 'so true')
assert (evaljs((jscode + py2js('a=[1,2]; foo(*a)'))) == '3')
assert (evaljs((jscode + py2js('a=[1,2]; foo(7, *a)'))) == '8')
assert (evalpy('d={"_base_class": console};d._base_class.log(4)') == '4')
assert (evalpy('d={"_base_class": console};d._base_class.log()') == '')
jscode = 'var foo = function () {return this.val};'
jscode += 'var d = {"foo": foo, "val": 7};\n'
assert (evaljs((jscode + py2js('d["foo"]()'))) == '7')
assert (evaljs((jscode + py2js('d["foo"](*[3, 4])'))) == '7')
def test_instantiation(self):
assert ('new' in py2js('a = Bar()'))
assert ('new' in py2js('a = x.Bar()'))
assert ('new' not in py2js('a = foo()'))
assert ('new' not in py2js('a = _foo()'))
assert ('new' not in py2js('a = _Foo()'))
assert ('new' not in py2js('a = this.Bar()'))
assert ('new' not in py2js('a = JSON.stringify(x)'))
jscode = 'function Bar() {this.x = 3}\nvar x=1;\n'
assert (evaljs((jscode + py2js('a=Bar()\nx'))) == '1')
assert ('new' in py2js('class foo:pass\na = foo()'))
assert ('new' not in py2js('class foo:pass\ndef foo():pass\na = foo()'))
assert ('new' not in py2js('def foo():pass\nclass foo:pass\na = foo()'))
assert ('new' not in py2js('def Bar():pass\na = Bar()'))
assert ('new' in py2js('def Bar():pass\nclass Bar:pass\na = Bar()'))
assert ('new' in py2js('class Bar:pass\ndef Bar():pass\na = Bar()'))
def test_pass(self):
assert (py2js('pass') == '')
def test_delete(self):
assert (evalpy('d={}\nd.foo=3\n\nd') == '{ foo: 3 }')
assert (evalpy('d={}\nd.foo=3\ndel d.foo\nd') == '{}')
assert (evalpy('d={}\nd.foo=3\nd.bar=3\ndel d.foo\nd') == '{ bar: 3 }')
assert (evalpy('d={}\nd.foo=3\nd.bar=3\ndel d.foo, d["bar"]\nd') == '{}') |
class LocalTrack(LocalItem):
album: LocalAlbum
artists: List[LocalArtist]
available_markets: List[None]
disc_number: int
duration_ms: Union[(int, dict)]
explicit: bool
external_ids: dict
external_urls: dict
is_local: Literal[True]
popularity: int
preview_url: None
track_number: int
uri: str |
class PlaySourceShowingPolicy(GObject.Object):
def __init__(self, list_view):
super(PlaySourceShowingPolicy, self).__init__()
self.counter = 0
self._has_initialised = False
def initialise(self, album_manager):
if self._has_initialised:
return
self._has_initialised = True |
def to_case_block(c: Case) -> Tuple[(Union[_core_wf.IfBlock], typing.List[Promise])]:
(expr, promises) = transform_to_boolexpr(cast(Union[(ComparisonExpression, ConjunctionExpression)], c.expr))
if (c.output_promise is not None):
n = c.output_node
return (_core_wf.IfBlock(condition=expr, then_node=n), promises) |
class UDisksDBusWrapper():
__slots__ = ['obj', 'iface_type', '_iface', '_props_iface', 'path']
def __init__(self, bus, root, path, iface_type):
self.obj = bus.get_object(root, path)
self.iface_type = iface_type
self._iface = None
self._props_iface = None
def __getattr__(self, member):
return self.iface.__getattr__(member)
def connect_to_signal(self, *a, **k):
return self.iface.connect_to_signal(*a, **k)
def iface(self):
if (self._iface is None):
self._iface = dbus.Interface(self.obj, self.iface_type)
return self._iface
def object_path(self):
return self.obj.object_path
def props(self):
if (self._props_iface is None):
iface = dbus.Interface(self.obj, 'org.freedesktop.DBus.Properties')
self._props_iface = UDisksPropertyWrapper(iface, self.iface_type)
return self._props_iface
def __repr__(self):
return ('<UDisksDBusWrapper: %s (%s)>' % (self.iface_type, self.path)) |
def distributed_worker(main_func: Callable[(..., _RT)], args: Tuple[(Any, ...)], kwargs: Dict[(str, Any)], backend: str, init_method: Optional[str]=None, dist_params: Optional[DistributedParams]=None, return_save_file: Optional[str]=None, timeout: timedelta=DEFAULT_TIMEOUT, shared_context: Optional[BaseSharedContext]=None) -> _RT:
if shared_context:
set_shared_context(shared_context)
dist_params = (dist_params or DistributedParams.from_environ())
(args, backend) = _maybe_convert_to_cpu_run(args, backend)
with enable_dist_process_groups(backend, init_method, dist_params, timeout):
d2_comm._LOCAL_PROCESS_GROUP = mcv_comm._LOCAL_PROCESS_GROUP
deco = save_return_deco(return_save_file, dist_params.global_rank)
return deco(main_func)(*args, **kwargs) |
def lazy_import():
from fastly.model.logging_common_response import LoggingCommonResponse
from fastly.model.logging_newrelicotlp_additional import LoggingNewrelicotlpAdditional
from fastly.model.service_id_and_version_string import ServiceIdAndVersionString
from fastly.model.timestamps import Timestamps
globals()['LoggingCommonResponse'] = LoggingCommonResponse
globals()['LoggingNewrelicotlpAdditional'] = LoggingNewrelicotlpAdditional
globals()['ServiceIdAndVersionString'] = ServiceIdAndVersionString
globals()['Timestamps'] = Timestamps |
class qos(object):
__qos_queue = None
__qtype = 0
def __init__(self, qtype):
self.__qos_queue = {}
self.__qtype = qtype
def add_to_queue(self, ipdata):
ip_ver = ((ipdata[0] & 240) >> 4)
if (ip_ver == 4):
if (self.__qtype == QTYPE_SRC):
address = ipdata[12:16]
else:
address = ipdata[16:20]
elif (self.__qtype == QTYPE_SRC):
address = ipdata[8:24]
else:
address = ipdata[24:40]
slot = address
if (slot not in self.__qos_queue):
self.__qos_queue[slot] = []
self.__qos_queue[slot].append(ipdata)
def get_queue(self):
results = []
dels = []
for slot in self.__qos_queue:
seq = self.__qos_queue[slot]
results.append(seq.pop(0))
if (not seq):
dels.append(slot)
for slot in dels:
del self.__qos_queue[slot]
return results |
class TestSyncSecAggServer():
def test_sync_secagg_server_init(self) -> None:
model = SampleNet(TwoFC())
fixed_point_config = FixedPointConfig(num_bytes=2, scaling_factor=100)
server = instantiate(SyncSecAggServerConfig(fixedpoint=fixed_point_config), global_model=model)
assertEqual(len(server._secure_aggregator.converters), 4)
assertEqual(server._secure_aggregator.converters['fc2.bias'].scaling_factor, 100)
def test_secure_aggregator_receive_update_from_client(self) -> None:
scaling_factor = 100
fixed_point_config = FixedPointConfig(num_bytes=2, scaling_factor=scaling_factor)
server = instantiate(SyncSecAggServerConfig(fixedpoint=fixed_point_config), global_model=SampleNet(create_model_with_value(0)))
server.init_round()
m1_param = 7.2345
m1_w = 3.0
model1 = create_model_with_value(m1_param)
server.receive_update_from_client(Message(SampleNet(model1), weight=m1_w))
m2_param = (- 3.45612)
m2_w = 7.0
model2 = create_model_with_value(m2_param)
server.receive_update_from_client(Message(SampleNet(model2), weight=m2_w))
expected_param = float(round((((m1_param * scaling_factor) * m1_w) + ((m2_param * scaling_factor) * m2_w))))
server.step()
mismatched = model_parameters_equal_to_value(server.global_model.fl_get_module(), ((- (expected_param / scaling_factor)) / (m1_w + m2_w)))
assertEqual(mismatched, '', mismatched) |
def _add_analysis_filter_to_query(key: str, value: Any, subkey: str):
if hasattr(AnalysisEntry, subkey):
if (subkey == 'summary'):
return _get_array_filter(AnalysisEntry.summary, key, value)
return (getattr(AnalysisEntry, subkey) == value)
return _add_json_filter(key, value, subkey) |
class StartStopExpand(Expand):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
x = self.start
all = []
while (x <= self.end):
all.append(x)
x += self.step
result = [list(g) for (_, g) in itertools.groupby(all, key=self.grouper_key)]
self.groups = [[self.format(x) for x in g] for g in result]
def parse_config(self):
if ('stop' in self._config):
raise ValueError(f"Use 'end' not 'stop' in loop. {self._config}")
super().parse_config()
def format(self, x):
return x |
def train_model(train_file, eval_file, scale, output_dir):
train_dataset = TrainAugmentDataset(train_file, scale=scale)
val_dataset = EvalDataset(eval_file)
training_args = TrainingArguments(output_dir=output_dir, num_train_epochs=1000)
config = EdsrConfig(scale=scale, n_resblocks=32, n_feats=256)
model = EdsrModel(config)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset)
trainer.train() |
def test_window_opacity_set_to_default_on_startup(mult_opacity_server: FlashServer, list_only_test_windows: None, windows: list[Window]) -> None:
with server_running(mult_opacity_server):
assert (windows[0].opacity == pytest.approx(0.2))
assert (windows[1].opacity == pytest.approx(0.5)) |
class SyncHighlighter(YamlHighlighter):
def __init__(self, parent=None):
YamlHighlighter.__init__(self, parent)
tagList = ['\\bignore_hosts\\b', '\\bsync_hosts\\b', '\\bignore_nodes\\b', '\\bsync_nodes\\b', '\\bignore_topics\\b', '\\bignore_publishers\\b', '\\bignore_topics\\b', '\\bsync_topics\\b', '\\bignore_subscribers\\b', '\\bsync_services\\b', '\\bsync_topics_on_demand\\b', '\\bsync_remote_nodes\\b', '\\bignore_services\\b', '\\bdo_not_sync\\b', '\\bresync_on_reconnect\\b', '\\bresync_on_reconnect_timeout\\b']
for tag in tagList:
self.rules.append((self._create_regexp(tag), self._create_format(Qt.darkBlue)))
self.rules.append((self._create_regexp('\\b\\*|\\*\\B|\\/\\*'), self._create_format(Qt.darkGreen, 'bold'))) |
class Solution():
def checkInclusion(self, p: str, s: str) -> bool:
from collections import Counter
counter = Counter(p)
start = 0
c2 = {}
for (i, c) in enumerate(s):
if (c not in counter):
start = (i + 1)
c2 = {}
continue
if (c not in c2):
c2[c] = 1
else:
c2[c] += 1
if (((i - start) + 1) < len(p)):
continue
elif (((i - start) + 1) > len(p)):
c2[s[start]] -= 1
start += 1
if (c2 == counter):
return True
return False |
class RxFrameErr(base_tests.SimpleDataPlane):
def runTest(self):
logging.info('Running Rx_Frame_Err test')
of_ports = config['port_map'].keys()
of_ports.sort()
self.assertTrue((len(of_ports) > 1), 'Not enough ports for test')
delete_all_flows(self.controller)
logging.info('Send Port_Stats Request')
logging.info('Verify reply has rx_frame_err count ')
counter = get_portstats(self, of_ports[0])
rx_fr_err = counter[8]
logging.info(('Recieve Frame Errors count is :' + str(rx_fr_err))) |
_dict
def _get_default_genesis_params(genesis_state: AccountState) -> Iterable[Tuple[(str, Union[(BlockNumber, int, None, bytes, Address, Hash32)])]]:
for (key, value) in GENESIS_DEFAULTS:
if ((key == 'state_root') and genesis_state):
pass
else:
(yield (key, value))
(yield ('timestamp', int(time.time()))) |
def extractHallucieWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def check_errors(thr, shape_and_axes, atol=2e-05, rtol=0.001):
dtype = numpy.complex64
(shape, axes) = shape_and_axes
data = get_test_array(shape, dtype)
fft = FFT(data, axes=axes)
fftc = fft.compile(thr)
data_dev = thr.to_device(data)
fftc(data_dev, data_dev)
fwd_ref = numpy.fft.fftn(data, axes=axes).astype(dtype)
assert diff_is_negligible(data_dev.get(), fwd_ref, atol=atol, rtol=rtol)
data_dev = thr.to_device(data)
fftc(data_dev, data_dev, inverse=True)
inv_ref = numpy.fft.ifftn(data, axes=axes).astype(dtype)
assert diff_is_negligible(data_dev.get(), inv_ref, atol=atol, rtol=rtol) |
def get_perturbed_head(head):
up_count = 0
while ((get_height(head) > 0) and (random.random() < LATENCY_FACTOR)):
head = blocks[head][1]
up_count += 1
for _ in range(random.randrange((up_count + 1))):
if (head in children):
head = random.choice(children[head])
return head |
class OptionSeriesPackedbubbleLabel(Options):
def boxesToAvoid(self):
return self._config_get(None)
def boxesToAvoid(self, value: Any):
self._config(value, js_type=False)
def connectorAllowed(self):
return self._config_get(False)
def connectorAllowed(self, flag: bool):
self._config(flag, js_type=False)
def connectorNeighbourDistance(self):
return self._config_get(24)
def connectorNeighbourDistance(self, num: float):
self._config(num, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get('undefined')
def formatter(self, value: Any):
self._config(value, js_type=False)
def maxFontSize(self):
return self._config_get(None)
def maxFontSize(self, num: float):
self._config(num, js_type=False)
def minFontSize(self):
return self._config_get(None)
def minFontSize(self, num: float):
self._config(num, js_type=False)
def onArea(self):
return self._config_get(None)
def onArea(self, flag: bool):
self._config(flag, js_type=False)
def style(self) -> 'OptionSeriesPackedbubbleLabelStyle':
return self._config_sub_data('style', OptionSeriesPackedbubbleLabelStyle)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False) |
def test_def_active_live_nok_nok(unused_tcp_port):
custom_range = range(unused_tcp_port, (unused_tcp_port + 1))
(host, port, orig_sock) = port_handler.find_available_port(custom_range=custom_range, custom_host='127.0.0.1')
assert (port == unused_tcp_port)
assert (orig_sock is not None)
assert (orig_sock.fileno() != (- 1))
_simulate_server(host, port, orig_sock)
with pytest.raises(port_handler.NoPortsInRangeException):
(host, port, sock) = port_handler.find_available_port(custom_range=custom_range, custom_host='127.0.0.1')
with pytest.raises(port_handler.NoPortsInRangeException):
(host, port, sock) = port_handler.find_available_port(custom_range=custom_range, custom_host='127.0.0.1', will_close_then_reopen_socket=True) |
def protocol_specific_classes(alice):
if alice.connection.has_protocol(ETHProtocolV63):
return (ETHV63API, ETHV63HandshakeReceipt, StatusV63, StatusV63PayloadFactory)
elif alice.connection.has_protocol(ETHProtocolV64):
return (ETHV64API, ETHHandshakeReceipt, Status, StatusPayloadFactory)
elif alice.connection.has_protocol(ETHProtocolV65):
return (ETHV65API, ETHHandshakeReceipt, Status, StatusPayloadFactory)
else:
raise Exception('No ETH protocol found') |
class Notifier(QtCore.QObject):
def __init__(self, parent: Optional[QtCore.QObject]) -> None:
super().__init__(parent=parent)
self.com = Communicate(parent=self)
self.com.send_notification.connect(self._send_notification)
def _compose_notification(capture: Capture) -> tuple[(str, str)]:
if ((not capture.ocr_text) or (len(capture.ocr_text.strip()) < 1)):
title = _('Nothing captured!')
text = _('Please try again.')
return (title, text)
text = capture.ocr_text.strip().replace(os.linesep, ' ')
text = textwrap.shorten(text, width=45)
if (capture.ocr_magic == 'ParagraphMagic'):
count = (capture.ocr_text.count((os.linesep * 2)) + 1)
title = translate.ngettext('1 paragraph captured', '{count} paragraphs captured', count).format(count=count)
elif (capture.ocr_magic == 'EmailMagic'):
count = capture.ocr_text.count('')
title = translate.ngettext('1 email captured', '{count} emails captured', count).format(count=count)
elif (capture.ocr_magic == 'SingleLineMagic'):
count = (capture.ocr_text.count(' ') + 1)
title = translate.ngettext('1 word captured', '{count} words captured', count).format(count=count)
elif (capture.ocr_magic == 'MultiLineMagic'):
count = (capture.ocr_text.count(os.linesep) + 1)
title = translate.ngettext('1 line captured', '{count} lines captured', count).format(count=count)
elif (capture.ocr_magic == 'UrlMagic'):
count = (capture.ocr_text.count(os.linesep) + 1)
title = translate.ngettext('1 URL captured', '{count} URLs captured', count).format(count=count)
elif (capture.mode == CaptureMode.RAW):
count = len(capture.ocr_text)
count -= ((len(os.linesep) - 1) * capture.ocr_text.count(os.linesep))
title = translate.ngettext('1 character captured', '{count} characters captured', count).format(count=count)
else:
title = ''
return (title, text)
(Capture)
def _send_notification(self, capture: Capture) -> None:
(title, message) = self._compose_notification(capture)
if ((sys.platform == 'linux') and shutil.which('notify-send')):
self._send_via_libnotify(title=title, message=message)
else:
self._send_via_qt_tray(title=title, message=message, ocr_text=capture.ocr_text, ocr_magic=capture.ocr_magic)
self.com.on_notification_sent.emit()
def _send_via_libnotify(title: str, message: str) -> None:
logger.debug('Send notification via notify-send')
icon_path = ((system_info.get_resources_path() / 'icons') / 'notification.png')
message = message.replace('\\', '\\\\')
message = message.replace('-', '\\-')
cmds = ['notify-send', f'--icon={icon_path.resolve()}', '--app-name=NormCap', f'{title}', f'{message}']
subprocess.Popen(cmds, start_new_session=True)
def _send_via_qt_tray(self, title: str, message: str, ocr_text: Optional[str], ocr_magic: Optional[str]) -> None:
logger.debug('Send notification via QT')
parent = self.parent()
if (not isinstance(parent, QtWidgets.QSystemTrayIcon)):
raise TypeError('Parent is expected to be of type QSystemTrayIcon.')
if parent.isSignalConnected(QtCore.QMetaMethod.fromSignal(parent.messageClicked)):
parent.messageClicked.disconnect()
if (ocr_text and (len(ocr_text.strip()) >= 1)):
parent.messageClicked.connect((lambda : self._open_ocr_result(text=ocr_text, applied_magic=ocr_magic)))
parent.show()
parent.showMessage(title, message, QtGui.QIcon(':notification'))
def _open_ocr_result(text: str, applied_magic: Optional[str]) -> None:
logger.debug('Notification clicked.')
urls = []
if (applied_magic == 'UrlMagic'):
urls = text.split()
elif (applied_magic == 'EmailMagic'):
urls = [f"mailto:{text.replace(',', ';').replace(' ', '')}"]
else:
temp_file = (Path(tempfile.gettempdir()) / 'normcap_temporary_result.txt')
temp_file.write_text(text)
urls = [temp_file.as_uri()]
for url in urls:
logger.debug('Opening URI %s...', url)
result = QtGui.QDesktopServices.openUrl(QtCore.QUrl(url, QtCore.QUrl.ParsingMode.TolerantMode))
logger.debug('Opened URI with result=%s', result) |
(nopython=True, cache=const.numba_cache)
def get_u_vec(rvw, phi, R, s):
if (s == const.rolling):
return np.array([1.0, 0.0, 0.0])
rel_vel = rel_velocity(rvw, R)
if (rel_vel == 0.0).all():
return np.array([1.0, 0.0, 0.0])
return ptmath.coordinate_rotation(ptmath.unit_vector(rel_vel), (- phi)) |
def extractDramanticsBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def filter_all_services(wf, query):
services = wf.cached_data('brew_all_services', get_all_services, session=True)
query_filter = query.split()
if (len(query_filter) > 1):
return wf.filter(query_filter[1], services, key=(lambda x: x['name']), match_on=MATCH_SUBSTRING)
return services |
class OptionSeriesOrganizationDatalabelsLinktextpathAttributes(Options):
def startOffset(self):
return self._config_get('95%')
def startOffset(self, num: float):
self._config(num, js_type=False)
def textAnchor(self):
return self._config_get('end')
def textAnchor(self, text: str):
self._config(text, js_type=False) |
class inDB(DBValidator):
def __init__(self, db, tablename, fieldname='id', dbset=None, label_field=None, multiple=False, orderby=None, message=None):
super().__init__(db, tablename, fieldname=fieldname, dbset=dbset, message=message)
self.label_field = label_field
self.multiple = multiple
self.orderby = orderby
def sorting(self):
if callable(self.orderby):
return self.orderby(self.table)
return None
def _get_rows(self):
return self.dbset.select(orderby=self.sorting)
def options(self, zero=True):
records = self._get_rows()
if self.label_field:
items = [(r.id, str(r[self.label_field])) for r in records]
elif self.db[self.tablename]._format:
items = [(r.id, (self.db[self.tablename]._format % r)) for r in records]
else:
items = [(r.id, r.id) for r in records]
return items
def __call__(self, value):
if self.multiple:
values = (value if isinstance(value, list) else [value])
records = self.dbset.where(self.field.belongs(values)).select(self.field, distinct=True).column(self.field)
if set(values).issubset(set(records)):
return (values, None)
elif self.dbset.where((self.field == value)).count():
return (value, None)
return (value, translate(self.message)) |
class TestUniqueValuesShare(BaseFeatureDataQualityMetricsTest):
name: ClassVar = 'Share of Unique Values'
def get_stat(self, current: NumericCharacteristics):
return current.unique_percentage
def get_condition_from_reference(self, reference: Optional[ColumnCharacteristics]) -> TestValueCondition:
if (reference is not None):
if (not isinstance(reference, (NumericCharacteristics, CategoricalCharacteristics, DatetimeCharacteristics))):
raise ValueError(f'{self.column_name} should be numerical, categorical or datetime')
unique_percentage = reference.unique_percentage
if (unique_percentage is not None):
return TestValueCondition(eq=approx((unique_percentage / 100.0), relative=0.1))
raise ValueError('Neither required test parameters nor reference data has been provided.')
def calculate_value_for_test(self) -> Optional[Numeric]:
features_stats = self.metric.get_result().current_characteristics
if isinstance(features_stats, TextCharacteristics):
raise ValueError(f'{self.column_name} should be numerical, categorical or datetime')
unique_percentage = features_stats.unique_percentage
if (unique_percentage is None):
return None
return (unique_percentage / 100.0)
def get_description(self, value: Numeric) -> str:
return f'The share of the unique values in the column **{self.column_name}** is {value:.3}. The test threshold is {self.get_condition()}.' |
class OptionPlotoptionsVariwideStatesInactive(Options):
def animation(self) -> 'OptionPlotoptionsVariwideStatesInactiveAnimation':
return self._config_sub_data('animation', OptionPlotoptionsVariwideStatesInactiveAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def opacity(self):
return self._config_get(0.2)
def opacity(self, num: float):
self._config(num, js_type=False) |
def prophet_train(data):
model = Prophet(daily_seasonality=False, yearly_seasonality=False, holidays=holiday_df, holidays_prior_scale=10)
model.add_seasonality(name='weekly', period=7, fourier_order=3, prior_scale=0.1)
model.fit(data)
future = model.make_future_dataframe(periods=7, freq='d')
forecast = model.predict(future)
forecast['pro_pred'] = np.expm1(forecast['yhat'])
forecast_df = forecast[['store_sku', 'ds', 'pro_pred']]
forecast_df.loc[((forecast_df['pro_pred'] < 0), 'pro_pred')] = 0
low = ((1 + 0.1) * data['y'].min())
hight = min(((1 + 0.05) * data['y'].max()), 10000)
forecast_df.loc[((forecast_df['pro_pred'] < low), 'pro_pred')] = low
forecast_df.loc[((forecast_df['pro_pred'] > hight), 'pro_pred')] = hight
return forecast |
class TestTrainerParams(unittest.TestCase):
def test_default_trainer_conf(self) -> None:
cs = ConfigStore.instance()
cs.store(name='trainer', node=TrainerConf())
with initialize():
trainer_conf = compose(config_name='trainer')
trainer_params = get_trainer_params(trainer_conf)
plugins = trainer_params.get('plugins', [])
self.assertEqual(len(plugins), 0)
trainer = Trainer(**trainer_params)
self.assertIsInstance(trainer, Trainer)
def test_trainer_conf_with_find_unused_parameters_false(self) -> None:
cs = ConfigStore.instance()
cs.store(name='trainer', node=TrainerConf(num_nodes=3, plugins=['ddp_find_unused_parameters_false']))
with initialize():
trainer_conf = compose(config_name='trainer')
trainer_params = get_trainer_params(trainer_conf)
plugins = trainer_params.get('plugins', [])
self.assertEqual(len(plugins), 1)
plugin = plugins[0]
self.assertIsInstance(plugin, DDPStrategy)
self.assertIsNone(plugin._ddp_comm_hook)
self.assertIsNone(plugin._ddp_comm_state)
self.assertIsNone(plugin._ddp_comm_wrapper)
self.assertEqual(plugin._ddp_kwargs, {'find_unused_parameters': False})
trainer = Trainer(**trainer_params)
self.assertIsInstance(trainer, Trainer)
check_training_type_plugin_attribute(trainer.strategy, 'num_nodes', 3)
def test_trainer_conf_with_ddp_fp16_compress_plugin(self) -> None:
cs = ConfigStore.instance()
cs.store(name='trainer', node=TrainerConf(plugins=['ddp_fp16_compress']))
with initialize():
trainer_conf = compose(config_name='trainer')
trainer_params = get_trainer_params(trainer_conf)
plugins = trainer_params.get('plugins', [])
self.assertEqual(len(plugins), 1)
plugin = plugins[0]
self.assertIsInstance(plugin, DDPStrategy)
self.assertIsNotNone(plugin._ddp_comm_hook)
self.assertEqual(plugin._ddp_comm_hook.__qualname__, 'fp16_compress_hook')
self.assertIsNone(plugin._ddp_comm_state)
self.assertIsNone(plugin._ddp_comm_wrapper)
self.assertEqual(plugin._ddp_kwargs, {'find_unused_parameters': True})
trainer = Trainer(**trainer_params)
self.assertIsInstance(trainer, Trainer)
check_training_type_plugin_attribute(trainer.strategy, 'num_nodes', 1)
def test_trainer_conf_with_ddp_multiple(self) -> None:
cs = ConfigStore.instance()
cs.store(name='trainer', node=TrainerConf(plugins=['ddp_fp16_compress', 'ddp_find_unused_parameters_false']))
with initialize():
trainer_conf = compose(config_name='trainer')
trainer_params = get_trainer_params(trainer_conf)
plugins = trainer_params.get('plugins', [])
self.assertEqual(len(plugins), 1)
plugin = plugins[0]
self.assertIsInstance(plugin, DDPStrategy)
self.assertIsNotNone(plugin._ddp_comm_hook)
self.assertEqual(plugin._ddp_comm_hook.__qualname__, 'fp16_compress_hook')
self.assertIsNone(plugin._ddp_comm_state)
self.assertIsNone(plugin._ddp_comm_wrapper)
self.assertEqual(plugin._ddp_kwargs, {'find_unused_parameters': False})
trainer = Trainer(**trainer_params)
self.assertIsInstance(trainer, Trainer)
check_training_type_plugin_attribute(trainer.strategy, 'num_nodes', 1)
def test_trainer_conf_with_ddp_fully_sharded_precision_32(self) -> None:
cs = ConfigStore.instance()
cs.store(name='trainer', node=TrainerConf(plugins=['ddp_fully_sharded']))
with initialize():
trainer_conf = compose(config_name='trainer')
trainer_params = get_trainer_params(trainer_conf)
plugins = trainer_params.get('plugins', [])
self.assertEqual(len(plugins), 1)
plugin = plugins[0]
self.assertIsInstance(plugin, DDPFullyShardedStrategy)
trainer = Trainer(**trainer_params)
self.assertIsInstance(trainer, Trainer)
check_training_type_plugin_attribute(trainer.strategy, 'num_nodes', 1)
def test_trainer_conf_with_ddp_fully_sharded_precision_16(self) -> None:
cs = ConfigStore.instance()
cs.store(name='trainer', node=TrainerConf(precision=16, plugins=['ddp_fully_sharded']))
with initialize():
trainer_conf = compose(config_name='trainer')
trainer_params = get_trainer_params(trainer_conf)
plugins = trainer_params.get('plugins', [])
self.assertEqual(len(plugins), 2)
training_type_plugin = plugins[0]
self.assertIsInstance(training_type_plugin, DDPFullyShardedStrategy)
precision_plugin = plugins[1]
self.assertIsInstance(precision_plugin, FullyShardedNativeMixedPrecisionPlugin)
trainer = Trainer(**trainer_params)
self.assertIsInstance(trainer, Trainer)
check_training_type_plugin_attribute(trainer.strategy, 'num_nodes', 1) |
def generate_docs_only_subset(paths: List[str]) -> Dict[(str, Any)]:
docs_only_subset = {}
for path in paths:
split_path = path.split('.')[::(- 1)]
current_obj = docs_only_subset
while (len(split_path) > 1):
temp_path = split_path.pop()
if (not current_obj.get(temp_path)):
current_obj[temp_path] = {'fields': {}}
current_obj = current_obj[temp_path]['fields']
current_obj[split_path[(- 1)]] = {}
return docs_only_subset |
def alt_bn128_add(evm: Evm) -> None:
data = evm.message.data
charge_gas(evm, Uint(500))
x0_bytes = buffer_read(data, U256(0), U256(32))
x0_value = U256.from_be_bytes(x0_bytes)
y0_bytes = buffer_read(data, U256(32), U256(32))
y0_value = U256.from_be_bytes(y0_bytes)
x1_bytes = buffer_read(data, U256(64), U256(32))
x1_value = U256.from_be_bytes(x1_bytes)
y1_bytes = buffer_read(data, U256(96), U256(32))
y1_value = U256.from_be_bytes(y1_bytes)
for i in (x0_value, y0_value, x1_value, y1_value):
if (i >= ALT_BN128_PRIME):
raise OutOfGasError
try:
p0 = BNP(BNF(x0_value), BNF(y0_value))
p1 = BNP(BNF(x1_value), BNF(y1_value))
except ValueError:
raise OutOfGasError
p = (p0 + p1)
evm.output = (p.x.to_be_bytes32() + p.y.to_be_bytes32()) |
class ProjectMixinTester(unittest.TestCase):
def setUp(self):
super(ProjectMixinTester, self).setUp()
self.test_stat1 = Status(name='On Hold', code='OH')
self.test_stat2 = Status(name='Work In Progress', code='WIP')
self.test_stat3 = Status(name='Approved', code='APP')
self.test_status_list_1 = StatusList(name='A Statuses', statuses=[self.test_stat1, self.test_stat3], target_entity_type=DeclProjMixA)
self.test_status_list_2 = StatusList(name='B Statuses', statuses=[self.test_stat2, self.test_stat3], target_entity_type=DeclProjMixB)
self.test_project_statuses = StatusList(name='Project Statuses', statuses=[self.test_stat2, self.test_stat3], target_entity_type='Project')
self.test_project_type = Type(name='Test Project Type', code='testproj', target_entity_type='Project')
self.test_repository = Repository(name='Test Repo', code='TR')
self.test_project = Project(name='Test Project', code='tp', type=self.test_project_type, status_list=self.test_project_statuses, repository=self.test_repository)
self.kwargs = {'name': 'ozgur', 'status_list': self.test_status_list_1, 'project': self.test_project}
self.test_a_obj = DeclProjMixA(**self.kwargs)
def test_project_attribute_is_working_properly(self):
assert (self.test_a_obj.project == self.test_project) |
class CycleBreakerTransform(StatefulDashTransform):
def __init__(self):
super().__init__()
def transform_layout(self, layout):
children = (_as_list(layout.children) + self.components)
layout.children = children
def apply(self, callbacks, clientside_callbacks):
cycle_inputs = {}
for c in (callbacks + clientside_callbacks):
for i in c.inputs:
if isinstance(i, CycleBreakerInput):
cid = self._cycle_break_id(i)
cycle_inputs[cid] = (i.component_id, i.component_property)
i.component_id = cid
i.component_property = 'dst'
self.components = [CycleBreaker(id=cid) for cid in cycle_inputs]
f = 'function(x){return x;}'
cycle_callbacks = []
for cid in cycle_inputs:
cb = CallbackBlueprint(Output(cid, 'src'), Input(*cycle_inputs[cid]))
cb.f = f
cycle_callbacks.append(cb)
return (callbacks, (clientside_callbacks + cycle_callbacks))
def _cycle_break_id(d: DashDependency):
return f"{str(d).replace('.', '_')}_breaker" |
class MPIFunctionTask(PythonFunctionTask[MPIJob]):
_MPI_JOB_TASK_TYPE = 'mpi'
_MPI_BASE_COMMAND = ['mpirun', '--allow-run-as-root', '-bind-to', 'none', '-map-by', 'slot', '-x', 'LD_LIBRARY_PATH', '-x', 'PATH', '-x', 'NCCL_DEBUG=INFO', '-mca', 'pml', 'ob1', '-mca', 'btl', '^openib']
def __init__(self, task_config: MPIJob, task_function: Callable, **kwargs):
if (task_config.num_workers and task_config.worker.replicas):
raise ValueError('Cannot specify both `num_workers` and `worker.replicas`. Please use `worker.replicas` as `num_workers` is depreacated.')
if ((task_config.num_workers is None) and (task_config.worker.replicas is None)):
raise ValueError('Must specify either `num_workers` or `worker.replicas`. Please use `worker.replicas` as `num_workers` is depreacated.')
if (task_config.num_launcher_replicas and task_config.launcher.replicas):
raise ValueError('Cannot specify both `num_workers` and `launcher.replicas`. Please use `launcher.replicas` as `num_launcher_replicas` is depreacated.')
if ((task_config.num_launcher_replicas is None) and (task_config.launcher.replicas is None)):
raise ValueError('Must specify either `num_workers` or `launcher.replicas`. Please use `launcher.replicas` as `num_launcher_replicas` is depreacated.')
super().__init__(task_config=task_config, task_function=task_function, task_type=self._MPI_JOB_TASK_TYPE, task_type_version=1, **kwargs)
def _convert_replica_spec(self, replica_config: Union[(Launcher, Worker)]) -> mpi_task.DistributedMPITrainingReplicaSpec:
resources = convert_resources_to_resource_model(requests=replica_config.requests, limits=replica_config.limits)
return mpi_task.DistributedMPITrainingReplicaSpec(command=replica_config.command, replicas=replica_config.replicas, image=replica_config.image, resources=(resources.to_flyte_idl() if resources else None), restart_policy=(replica_config.restart_policy.value if replica_config.restart_policy else None))
def _convert_run_policy(self, run_policy: RunPolicy) -> kubeflow_common.RunPolicy:
return kubeflow_common.RunPolicy(clean_pod_policy=(run_policy.clean_pod_policy.value if run_policy.clean_pod_policy else None), ttl_seconds_after_finished=run_policy.ttl_seconds_after_finished, active_deadline_seconds=run_policy.active_deadline_seconds, backoff_limit=run_policy.backoff_limit)
def _get_base_command(self, settings: SerializationSettings) -> List[str]:
return super().get_command(settings)
def get_command(self, settings: SerializationSettings) -> List[str]:
cmd = self._get_base_command(settings)
if self.task_config.num_workers:
num_workers = self.task_config.num_workers
else:
num_workers = self.task_config.worker.replicas
num_procs = (num_workers * self.task_config.slots)
mpi_cmd = (((self._MPI_BASE_COMMAND + ['-np', f'{num_procs}']) + ['python', settings.entrypoint_settings.path]) + cmd)
return mpi_cmd
def get_custom(self, settings: SerializationSettings) -> Dict[(str, Any)]:
worker = self._convert_replica_spec(self.task_config.worker)
if self.task_config.num_workers:
worker.replicas = self.task_config.num_workers
launcher = self._convert_replica_spec(self.task_config.launcher)
if self.task_config.num_launcher_replicas:
launcher.replicas = self.task_config.num_launcher_replicas
run_policy = (self._convert_run_policy(self.task_config.run_policy) if self.task_config.run_policy else None)
mpi_job = mpi_task.DistributedMPITrainingTask(worker_replicas=worker, launcher_replicas=launcher, slots=self.task_config.slots, run_policy=run_policy)
return MessageToDict(mpi_job) |
class OneOfTests(SignatureFixtures):
_test = _test_annotated_signature
exact = (RepTests.oneof_basic, ('hello',), ['hello'], {})
icase = (RepTests.oneof_basic, ('Hello',), ['hello'], {})
def test_show_list(self):
func = support.f('par:a', globals={'a': RepTests.oneof_help[1]})
(out, err) = self.crun(func, ['name', 'list'])
self.assertEqual('', err.getvalue())
self.assertLinesEqual('\n name: Possible values for par:\n hello h1\n bye h2\n ', out.getvalue()) |
def filter_extension_controller_fortigate_profile_data(json):
option_list = ['id', 'lan_extension', 'name']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
def default_text_header(iline, xline, offset):
lines = {1: ('DATE %s' % datetime.date.today().isoformat()), 2: 'AN INCREASE IN AMPLITUDE EQUALS AN INCREASE IN ACOUSTIC IMPEDANCE', 3: 'Written by libsegyio (python)', 11: 'TRACE HEADER POSITION:', 12: (' INLINE BYTES %03d-%03d | OFFSET BYTES %03d-%03d' % (iline, (iline + 4), int(offset), (int(offset) + 4))), 13: (' CROSSLINE BYTES %03d-%03d |' % (xline, (xline + 4))), 15: 'END EBCDIC HEADER'}
rows = segyio.create_text_header(lines)
rows = bytearray(rows, 'ascii')
rows[(- 1)] = 128
return bytes(rows) |
class WTypeTyper(Typer):
def supported() -> bool:
return (is_wayland() and is_installed('wtype'))
def name() -> str:
return 'wtype'
def get_active_window(self) -> str:
return 'not possible with wtype'
def type_characters(self, characters: str, active_window: str) -> None:
run(['wtype', characters]) |
class MongoDBConnector(BaseConnector[MongoClient]):
def build_uri(self) -> str:
config = MongoDBSchema(**(self.configuration.secrets or {}))
user_pass: str = ''
default_auth_db: str = ''
if (config.username and config.password):
user_pass = f'{config.username}:{config.password}'
if config.defaultauthdb:
default_auth_db = f'/{config.defaultauthdb}'
port: str = (f':{config.port}' if config.port else '')
url = f'mongodb://{user_pass}{config.host}{port}{default_auth_db}'
return url
def create_client(self) -> MongoClient:
uri = ((self.configuration.secrets or {}).get('url') or self.build_uri())
try:
return MongoClient(uri, serverSelectionTimeoutMS=5000)
except ValueError:
raise ConnectionException('Value Error connecting to MongoDB.')
def query_config(self, node: TraversalNode) -> QueryConfig[Any]:
return MongoQueryConfig(node)
def test_connection(self) -> Optional[ConnectionTestStatus]:
logger.info('Starting test connection to {}', self.configuration.key)
client = self.client()
try:
client.server_info()
default_auth_db = (self.configuration.secrets.get('defaultauthdb') if self.configuration.secrets else None)
if default_auth_db:
db = client[default_auth_db]
db.collection_names()
except ServerSelectionTimeoutError:
raise ConnectionException('Server Selection Timeout Error connecting to MongoDB.')
except OperationFailure:
raise ConnectionException('Operation Failure connecting to MongoDB.')
except Exception:
raise ConnectionException('Connection Error connecting to MongoDB.')
finally:
client.close()
return ConnectionTestStatus.succeeded
def retrieve_data(self, node: TraversalNode, policy: Policy, privacy_request: PrivacyRequest, input_data: Dict[(str, List[Any])]) -> List[Row]:
query_config = self.query_config(node)
client = self.client()
query_components = query_config.generate_query(input_data, policy)
if (query_components is None):
return []
(query_data, fields) = query_components
db_name = node.address.dataset
collection_name = node.address.collection
db = client[db_name]
collection = db[collection_name]
rows = []
logger.info('Starting data retrieval for {}', node.address)
for row in collection.find(query_data, fields):
rows.append(row)
logger.info('Found {} rows on {}', len(rows), node.address)
return rows
def mask_data(self, node: TraversalNode, policy: Policy, privacy_request: PrivacyRequest, rows: List[Row], input_data: Dict[(str, List[Any])]) -> int:
query_config = self.query_config(node)
collection_name = node.address.collection
client = self.client()
update_ct = 0
for row in rows:
update_stmt = query_config.generate_update_stmt(row, policy, privacy_request)
if (update_stmt is not None):
(query, update) = update_stmt
db = client[node.address.dataset]
collection = db[collection_name]
update_result = collection.update_one(query, update, upsert=False)
update_ct += update_result.modified_count
logger.info('db.{}.update_one({}, {}, upsert=False)', collection_name, Pii(query), Pii(update))
return update_ct
def close(self) -> None:
if self.db_client:
self.db_client.close() |
class TestChoiceFieldChoicesValidate(TestCase):
CHOICES = [(0, 'Small'), (1, 'Medium'), (2, 'Large')]
SINGLE_CHOICES = [0, 1, 2]
CHOICES_NESTED = [('Category', ((1, 'First'), (2, 'Second'), (3, 'Third'))), (4, 'Fourth')]
MIXED_CHOICES = [('Category', ((1, 'First'), (2, 'Second'))), 3, (4, 'Fourth')]
def test_choices(self):
f = serializers.ChoiceField(choices=self.CHOICES)
value = self.CHOICES[0][0]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail(('Value %s does not validate' % str(value)))
def test_single_choices(self):
f = serializers.ChoiceField(choices=self.SINGLE_CHOICES)
value = self.SINGLE_CHOICES[0]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail(('Value %s does not validate' % str(value)))
def test_nested_choices(self):
f = serializers.ChoiceField(choices=self.CHOICES_NESTED)
value = self.CHOICES_NESTED[0][1][0][0]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail(('Value %s does not validate' % str(value)))
def test_mixed_choices(self):
f = serializers.ChoiceField(choices=self.MIXED_CHOICES)
value = self.MIXED_CHOICES[1]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail(('Value %s does not validate' % str(value))) |
class OptionSeriesTilemapSonificationContexttracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsDumbbellSonificationTracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
class PPOAgent(Agent):
def __init__(self, model: ModelLike, deterministic_policy: bool=False, replay_buffer: Optional[ReplayBufferLike]=None, controller: Optional[ControllerLike]=None, optimizer: Optional[torch.optim.Optimizer]=None, batch_size: int=512, max_grad_norm: float=1.0, gamma: float=0.99, gae_lambda: float=0.95, ratio_clipping_eps: float=0.2, value_clipping_eps: Optional[float]=0.2, vf_loss_coeff: float=0.5, entropy_coeff: float=0.01, rescale_reward: bool=True, max_abs_reward: float=10.0, normalize_advantage: bool=True, learning_starts: Optional[int]=None, model_push_period: int=10, local_batch_size: int=1024) -> None:
super().__init__()
self._model = model
self._deterministic_policy = torch.tensor([deterministic_policy])
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._gamma = gamma
self._gae_lambda = gae_lambda
self._ratio_clipping_eps = ratio_clipping_eps
self._value_clipping_eps = value_clipping_eps
self._vf_loss_coeff = vf_loss_coeff
self._entropy_coeff = entropy_coeff
self._rescale_reward = rescale_reward
self._max_abs_reward = max_abs_reward
self._reward_rescaler = (StdRescaler(size=1) if rescale_reward else None)
self._normalize_advantage = normalize_advantage
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._local_batch_size = local_batch_size
self._trajectory = []
self._step_counter = 0
self._eval_executor = None
def reset(self) -> None:
self._step_counter = 0
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
(action, logpi, v) = self._model.act(obs, self._deterministic_policy)
return Action(action, info={'logpi': logpi, 'v': v})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
(action, logpi, v) = (await self._model.async_act(obs, self._deterministic_policy))
return Action(action, info={'logpi': logpi, 'v': v})
def observe_init(self, timestep: TimeStep) -> None:
if (self._replay_buffer is None):
return
(obs, _, terminated, truncated, _) = timestep
if (terminated or truncated):
self._trajectory.clear()
else:
self._trajectory = [{'obs': obs, 'terminated': terminated, 'truncated': truncated}]
async def async_observe_init(self, timestep: TimeStep) -> None:
self.observe_init(timestep)
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if (self._replay_buffer is None):
return
(act, info) = action
(obs, reward, terminated, truncated, _) = next_timestep
cur = self._trajectory[(- 1)]
cur['action'] = act
cur['logpi'] = info['logpi']
cur['v'] = info['v']
cur['reward'] = reward
self._trajectory.append({'obs': obs, 'terminated': terminated, 'truncated': truncated})
async def async_observe(self, action: Action, next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if (not self._trajectory):
return
last_step = self._trajectory[(- 1)]
done = (last_step['terminated'] or last_step['truncated'])
if ((self._replay_buffer is None) or (not done)):
return
last_step['reward'] = 0.0
last_v = torch.zeros(1)
if last_step['truncated']:
(_, _, last_v) = self._model.act(last_step['obs'], self._deterministic_policy)
last_step['v'] = last_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if (not self._trajectory):
return
last_step = self._trajectory[(- 1)]
done = (last_step['terminated'] or last_step['truncated'])
if ((self._replay_buffer is None) or (not done)):
return
last_step['reward'] = 0.0
last_v = torch.zeros(1)
if last_step['truncated']:
(_, _, last_v) = (await self._model.async_act(last_step['obs'], self._deterministic_policy))
last_step['v'] = last_v
replay = self._make_replay()
(await self._async_send_replay(replay))
self._trajectory.clear()
def train(self, num_steps: int, keep_evaluation_loops: bool=False) -> StatsDict:
phase = self._controller.phase()
if keep_evaluation_loops:
self._controller.set_phase((Phase.TRAIN | phase))
else:
self._controller.set_phase(Phase.TRAIN)
self._replay_buffer.warm_up(self._learning_starts)
stats = StatsDict()
console.log(f'Training for num_steps = {num_steps}')
for _ in track(range(num_steps), description='Training...'):
t0 = time.perf_counter()
(_, batch, _) = self._replay_buffer.sample(self._batch_size)
t1 = time.perf_counter()
step_stats = self._train_step(batch)
t2 = time.perf_counter()
time_stats = {'sample_data_time/ms': ((t1 - t0) * 1000.0), 'batch_learn_time/ms': ((t2 - t1) * 1000.0)}
stats.extend(step_stats)
stats.extend(time_stats)
self._step_counter += 1
if ((self._step_counter % self._model_push_period) == 0):
self._model.push()
self._model.push()
self._model.release()
episode_stats = self._controller.stats(Phase.TRAIN)
stats.update(episode_stats)
self._controller.reset_phase(Phase.TRAIN)
return stats
def eval(self, num_episodes: Optional[int]=None, keep_training_loops: bool=False, non_blocking: bool=False) -> Union[(StatsDict, Future)]:
if (not non_blocking):
return self._eval(num_episodes, keep_training_loops)
if (self._eval_executor is None):
self._eval_executor = ThreadPoolExecutor(max_workers=1)
return self._eval_executor.submit(self._eval, num_episodes, keep_training_loops)
def _make_replay(self) -> List[NestedTensor]:
(adv, ret) = self._compute_gae_and_return([x['v'] for x in self._trajectory], [x['reward'] for x in self._trajectory], self._reward_rescaler)
self._trajectory.pop()
for (cur, a, r) in zip(self._trajectory, adv, ret):
cur['gae'] = a
cur['ret'] = r
cur.pop('reward')
cur.pop('terminated')
cur.pop('truncated')
return self._trajectory
def _send_replay(self, replay: List[NestedTensor]) -> None:
batch = []
while replay:
batch.append(replay.pop())
if (len(batch) >= self._local_batch_size):
self._replay_buffer.extend(batch)
batch.clear()
if batch:
self._replay_buffer.extend(batch)
batch.clear()
async def _async_send_replay(self, replay: List[NestedTensor]) -> None:
batch = []
while replay:
batch.append(replay.pop())
if (len(batch) >= self._local_batch_size):
(await self._replay_buffer.async_extend(batch))
batch.clear()
if batch:
(await self._replay_buffer.async_extend(batch))
batch.clear()
def _compute_gae_and_return(self, val: Sequence[Union[(float, torch.Tensor)]], rew: Sequence[Union[(float, torch.Tensor)]], reward_rescaler: Optional[Rescaler]=None) -> Tuple[(Iterable[torch.Tensor], Iterable[torch.Tensor])]:
n = len(val)
v = val[(- 1)]
g = torch.zeros(1)
gae = torch.zeros(1)
adv = []
ret = []
for i in range((n - 2), (- 1), (- 1)):
(value, reward) = (val[i], rew[i])
if (not isinstance(reward, torch.Tensor)):
reward = torch.tensor([reward], dtype=torch.float32)
if (reward_rescaler is not None):
g = (reward + (self._gamma * g))
reward_rescaler.update(g)
reward = reward_rescaler.rescale(reward)
if (self._max_abs_reward is not None):
reward.clamp_((- self._max_abs_reward), self._max_abs_reward)
delta = ((reward + (self._gamma * v)) - value)
v = value
gae = (delta + ((self._gamma * self._gae_lambda) * gae))
adv.append(gae)
ret.append((gae + v))
return (reversed(adv), reversed(ret))
def _train_step(self, batch: NestedTensor) -> Dict[(str, float)]:
device = self._model.device
batch = nested_utils.map_nested((lambda x: x.to(device)), batch)
self._optimizer.zero_grad()
obs = batch['obs']
act = batch['action']
adv = batch['gae']
ret = batch['ret']
behavior_logpi = batch['logpi']
behavior_v = batch['v']
(logpi, v) = self._model_forward(obs)
(policy_loss, ratio) = self._policy_loss(logpi.gather(dim=(- 1), index=act), behavior_logpi, adv)
value_loss = self._value_loss(ret, v, behavior_v)
entropy = self._entropy(logpi)
loss = ((policy_loss + (self._vf_loss_coeff * value_loss)) - (self._entropy_coeff * entropy))
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(self._model.parameters(), self._max_grad_norm)
self._optimizer.step()
return {'return': ret.detach().mean().item(), 'policy_ratio': ratio.detach().mean().item(), 'policy_loss': policy_loss.detach().mean().item(), 'value_loss': value_loss.detach().mean().item(), 'entropy': entropy.detach().mean().item(), 'loss': loss.detach().mean().item(), 'grad_norm': grad_norm.detach().mean().item()}
def _model_forward(self, obs: torch.Tensor) -> Tuple[(torch.Tensor, ...)]:
return self._model(obs)
def _policy_loss(self, logpi: torch.Tensor, behavior_logpi: torch.Tensor, adv: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
if self._normalize_advantage:
(std, mean) = torch.std_mean(adv, unbiased=False)
adv = ((adv - mean) / std)
ratio = (logpi - behavior_logpi).exp()
clipped_ratio = ratio.clamp((1.0 - self._ratio_clipping_eps), (1.0 + self._ratio_clipping_eps))
surr1 = (ratio * adv)
surr2 = (clipped_ratio * adv)
policy_loss = (- torch.min(surr1, surr2).mean())
return (policy_loss, ratio)
def _value_loss(self, ret: torch.Tensor, v: torch.Tensor, behavior_v: Optional[torch.Tensor]=None) -> torch.Tensor:
if (self._value_clipping_eps is None):
return F.mse_loss(v, ret)
clipped_v = (behavior_v + torch.clamp((v - behavior_v), (- self._value_clipping_eps), self._value_clipping_eps))
vf1 = F.mse_loss(v, ret, reduction='none')
vf2 = F.mse_loss(clipped_v, ret, reduction='none')
return torch.max(vf1, vf2).mean()
def _entropy(self, logpi: torch.Tensor) -> torch.Tensor:
return (- (logpi.exp() * logpi).sum(dim=(- 1)).mean())
def _eval(self, num_episodes: int, keep_training_loops: bool=False) -> StatsDict:
phase = self._controller.phase()
if keep_training_loops:
self._controller.set_phase((Phase.EVAL | phase))
else:
self._controller.set_phase(Phase.EVAL)
self._controller.reset_phase(Phase.EVAL, limit=num_episodes)
while (self._controller.count(Phase.EVAL) < num_episodes):
time.sleep(1)
stats = self._controller.stats(Phase.EVAL)
self._controller.set_phase(phase)
return stats |
class ApproxValue(FrozenBaseModel, ExcludeNoneMixin):
class Config():
smart_union = True
DEFAULT_RELATIVE: ClassVar = 1e-06
DEFAULT_ABSOLUTE: ClassVar = 1e-12
value: Numeric
relative: Numeric
absolute: Numeric
def __init__(self, value: Numeric, relative: Optional[Numeric]=None, absolute: Optional[Numeric]=None):
if ((relative is not None) and (relative <= 0)):
raise ValueError('Relative value for approx should be greater than 0')
if (relative is None):
relative = self.DEFAULT_RELATIVE
if (absolute is None):
absolute = self.DEFAULT_ABSOLUTE
super().__init__(value=value, relative=relative, absolute=absolute)
def tolerance(self) -> Numeric:
relative_value = (abs(self.value) * self.relative)
return max(relative_value, self.absolute)
def __format__(self, format_spec):
return f'{format(self.value, format_spec)} {format(self.tolerance, format_spec)}'
def __repr__(self):
return f'{self.value} {self.tolerance}'
def __eq__(self, other):
tolerance = self.tolerance
return ((self.value - tolerance) <= other <= (self.value + tolerance))
def __lt__(self, other):
return ((self.value + self.tolerance) < other)
def __le__(self, other):
return ((self.value - self.tolerance) <= other)
def __gt__(self, other):
return ((self.value - self.tolerance) > other)
def __ge__(self, other):
return ((self.value + self.tolerance) >= other) |
def test_workflow_config_duplicate_log_message(caplog, monkeypatch):
def get_mock_config():
workflow_mock = Mock(spec=workflow_config.WorkflowConfig)
workflow_mock.name = 'same_name'
workflow_mock.config_path = '/duplicate/path'
workflow_mock.function_dir = 'func_dir'
return workflow_mock
config = workflow_config.WorkflowConfigs()
workflows = [get_mock_config(), get_mock_config()]
monkeypatch.setattr(config, '_workflows', workflows)
with caplog.at_level(logging.INFO):
config.get_workflows()
assert ('Duplicate workflow name: same_name, skipping func_dir' in caplog.text) |
class GymDialogues(BaseGymDialogues):
def __init__(self, **kwargs: Any) -> None:
def role_from_first_message(message: Message, receiver_address: Address) -> BaseDialogue.Role:
return GymDialogue.Role.ENVIRONMENT
BaseGymDialogues.__init__(self, self_address=str(PUBLIC_ID), role_from_first_message=role_from_first_message, **kwargs) |
class TestUpdateOtherFieldHook(unittest.TestCase):
def test_init_event_update_field_hook(self) -> None:
dummy_obj = DummyInstance('01', '//fbsource')
self.assertEqual(dummy_obj.name, 'Tupper01')
self.assertEqual(dummy_obj.output_path, '//fbsource:output')
self.assertEqual(dummy_obj.storage, '//fbsource:storage')
def test_update_event_update_field_hook(self) -> None:
dummy_obj = DummyInstance('01', '//fbsource')
dummy_obj.input_path = '//www'
dummy_obj.instance_id = '02'
self.assertEqual(dummy_obj.name, 'Tupper02')
self.assertEqual(dummy_obj.output_path, '//www:output')
self.assertEqual(dummy_obj.storage, '//www:storage') |
class Solution():
def longestStrChain(self, words: List[str]) -> int:
def fill_pre(w2, ss, dd):
if (w2 not in ss):
return 0
if (w2 in dd):
return dd[w2]
m = 1
for i in range(len(w2)):
w = (w2[:i] + w2[(i + 1):])
if (w not in ss):
continue
m = max(m, (fill_pre(w, ss, dd) + 1))
dd[w2] = m
return dd[w2]
words.sort(key=(lambda x: len(x)))
dd = {}
ss = set(words)
for w in reversed(words):
fill_pre(w, ss, dd)
return max(dd.values()) |
def socket_reader(sockobj, outq, exit_event):
while (not exit_event.is_set()):
try:
buf = sockobj.recv(1)
if (len(buf) < 1):
break
outq.put(buf)
except socket.timeout:
continue
except OSError:
break |
def test_dispatch_to_response_pure_invalid_result() -> None:
def not_a_result() -> None:
return None
assert (dispatch_to_response_pure(deserializer=default_deserializer, validator=default_validator, post_process=identity, context=NOCONTEXT, methods={'not_a_result': not_a_result}, request='{"jsonrpc": "2.0", "method": "not_a_result", "id": 1}') == Left(ErrorResponse(ERROR_INTERNAL_ERROR, 'Internal error', 'The method did not return a valid Result (returned None)', 1))) |
class OgfOcf():
def __init__(self, name, ogf=b'\x00', ocf=b'\x00'):
self.name = name
self.ogf = ogf
self.ocf = ocf
def encode(self):
val = pack('<H', ((ord(self.ogf) << 10) | ord(self.ocf)))
return val
def decode(self, data):
val = unpack('<H', data[:len(self)])[0]
self.ogf = (val >> 10)
self.ocf = int((val - (self.ogf << 10))).to_bytes(1, 'big')
self.ogf = int(self.ogf).to_bytes(1, 'big')
return data[len(self):]
def __len__(self):
return calcsize('<H')
def show(self, depth=0):
print('{}Cmd Group:'.format((PRINT_INDENT * depth)))
print('{}{}'.format((PRINT_INDENT * (depth + 1)), self.ogf))
print('{}Cmd Code:'.format((PRINT_INDENT * depth)))
print('{}{}'.format((PRINT_INDENT * (depth + 1)), self.ocf)) |
def test_string_counter():
counter = StringCounter()
counter2 = StringCounter()
counter.add(None)
counter2.add(None)
assert (str(counter) == '')
counter.add('foo')
counter.add('bar')
counter.add('foo')
counter2.add('baz')
assert (str(counter) == 'foo=2, bar=1')
assert (str(counter2) == 'baz=1') |
_os(*metadata.platforms)
def main():
mstsc = 'C:\\Users\\Public\\mstsc.exe'
path = 'C:\\Users\\Public\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup'
argpath = "C:\\Users\\Public\\AppData\\Roaming\\Microsoft\\Windows\\'Start Menu'\\Programs\\Startup"
common.copy_file(EXE_FILE, mstsc)
Path(path).mkdir(parents=True, exist_ok=True)
file = (argpath + '\\file.exe')
common.execute([mstsc, '/c', f'echo AAAAAAAA | Out-File {file}'], timeout=10, kill=True)
common.remove_files(mstsc) |
def test_snr():
measurements = np.array([[[1, 2, 3], [4, 5, 6]], [[2, 3, 4], [5, 6, 7]]])
snr = calc_signal_to_noise_ratio(measurements)
snr_with_flat = calc_signal_to_noise_ratio(measurements.reshape((2, (- 1))))
correct_snr = np.array([3, 5, 7, 9, 11, 13])
assert_array_equal(snr, correct_snr)
assert_array_equal(snr_with_flat, correct_snr) |
class Main():
_main = None
def __init__(self, exaile):
from xlgui import icons, main, panels, tray, progress
Gdk.set_program_class('Exaile')
GLib.set_application_name('Exaile')
os.environ['PULSE_PROP_media.role'] = 'music'
self.exaile = exaile
self.first_removed = False
self.tray_icon = None
self.builder = guiutil.get_builder(xdg.get_data_path('ui', 'main.ui'))
self.progress_box = self.builder.get_object('progress_box')
self.progress_manager = progress.ProgressManager(self.progress_box)
add_icon = icons.MANAGER.add_icon_name_from_directory
images_dir = xdg.get_data_path('images')
exaile_icon_path = add_icon('exaile', images_dir)
Gtk.Window.set_default_icon_name('exaile')
if (not xdg.fhs_compliant):
os.environ['PULSE_PROP_application.icon_name'] = exaile_icon_path
for name in ('exaile-pause', 'exaile-play', 'office-calendar', 'extension', 'music-library', 'artist', 'genre'):
add_icon(name, images_dir)
for name in ('dynamic', 'repeat', 'shuffle'):
add_icon(('media-playlist-' + name), images_dir)
logger.info('Loading main window...')
self.main = main.MainWindow(self, self.builder, exaile.collection)
if self.exaile.options.StartMinimized:
self.main.window.iconify()
self.play_toolbar = self.builder.get_object('play_toolbar')
panel_notebook = self.builder.get_object('panel_notebook')
self.panel_notebook = panels.PanelNotebook(exaile, self)
self.device_panels = {}
for device in self.exaile.devices.get_devices():
if device.connected:
self.add_device_panel(None, None, device)
logger.info('Connecting panel events...')
self.main._connect_panel_events()
guiutil.gtk_widget_replace(panel_notebook, self.panel_notebook)
self.panel_notebook.get_parent().child_set_property(self.panel_notebook, 'shrink', False)
if settings.get_option('gui/use_tray', False):
if tray.is_supported():
self.tray_icon = tray.TrayIcon(self.main)
else:
settings.set_option('gui/use_tray', False)
logger.warning('Tray icons are not supported on your platform. Disabling tray icon.')
from xl import event
event.add_ui_callback(self.add_device_panel, 'device_connected')
event.add_ui_callback(self.remove_device_panel, 'device_disconnected')
event.add_ui_callback(self.on_gui_loaded, 'gui_loaded')
logger.info('Done loading main window...')
Main._main = self
if (sys.platform == 'darwin'):
self._setup_osx()
def open_uris(self, uris, play=True):
if (len(uris) > 0):
self.open_uri(uris[0], play=play)
for uri in uris[1:]:
self.open_uri(uri, play=False)
def open_uri(self, uri, play=True):
from xl import playlist, trax
if playlist.is_valid_playlist(uri):
try:
playlist = playlist.import_playlist(uri)
except playlist.InvalidPlaylistTypeError:
pass
else:
self.main.playlist_container.create_tab_from_playlist(playlist)
if play:
player.QUEUE.current_playlist = playlist
player.QUEUE.current_playlist.current_position = 0
player.QUEUE.play(playlist[0])
else:
page = self.main.get_selected_page()
column = page.view.get_sort_column()
reverse = False
sort_by = common.BASE_SORT_TAGS
if column:
reverse = (column.get_sort_order() == Gtk.SortType.DESCENDING)
sort_by = ([column.name] + sort_by)
tracks = trax.get_tracks_from_uri(uri)
tracks = trax.sort_tracks(sort_by, tracks, reverse=reverse)
try:
page.playlist.extend(tracks)
page.playlist.current_position = (len(page.playlist) - len(tracks))
if play:
player.QUEUE.current_playlist = page.playlist
player.QUEUE.play(tracks[0])
except IndexError:
pass
def show_cover_manager(self, *e):
from xlgui.cover import CoverManager
CoverManager(self.main.window, self.exaile.collection)
def show_preferences(self):
from xlgui.preferences import PreferencesDialog
dialog = PreferencesDialog(self.main.window, self)
dialog.run()
def show_devices(self):
from xlgui.devices import ManagerDialog
dialog = ManagerDialog(self.main.window, self)
dialog.run()
def queue_manager(self, *e):
self.main.playlist_container.show_queue()
def collection_manager(self, *e):
from xl.collection import Library
from xlgui.collection import CollectionManagerDialog
dialog = CollectionManagerDialog(self.main.window, self.exaile.collection)
result = dialog.run()
dialog.hide()
if (result == Gtk.ResponseType.APPLY):
collection = self.exaile.collection
collection.freeze_libraries()
collection_libraries = sorted(((l.location, l.monitored, l.startup_scan) for l in collection.libraries.values()))
new_libraries = sorted(dialog.get_items())
if (collection_libraries != new_libraries):
collection_locations = [location for (location, monitored, startup_scan) in collection_libraries]
new_locations = [location for (location, monitored, startup_scan) in new_libraries]
if (collection_locations != new_locations):
for location in new_locations:
if (location not in collection_locations):
collection.add_library(Library(location))
removals = []
for (location, library) in collection.libraries.items():
if (location not in new_locations):
removals.append(library)
for removal in removals:
collection.remove_library(removal)
self.on_rescan_collection()
for (location, monitored, startup_scan) in new_libraries:
collection.libraries[location].monitored = monitored
collection.libraries[location].startup_scan = startup_scan
collection.thaw_libraries()
dialog.destroy()
def on_gui_loaded(self, event, object, nothing):
GLib.idle_add(self.panel_notebook.on_gui_loaded)
self.main._update_track_information()
def on_rescan_collection(self, *e):
self.rescan_collection_with_progress()
def on_rescan_collection_forced(self, *e):
self.rescan_collection_with_progress(force_update=True)
def rescan_collection_with_progress(self, startup=False, force_update=False):
libraries = self.exaile.collection.get_libraries()
if ((not self.exaile.collection._scanning) and (len(libraries) > 0)):
from xl.collection import CollectionScanThread
thread = CollectionScanThread(self.exaile.collection, startup_scan=startup, force_update=force_update)
thread.connect('done', self.on_rescan_done)
self.progress_manager.add_monitor(thread, _('Scanning collection...'), 'drive-harddisk')
def on_rescan_done(self, thread):
GLib.idle_add(self.get_panel('collection').load_tree)
def on_track_properties(self, *e):
pl = self.main.get_selected_page()
pl.view.show_properties_dialog()
def get_active_panel(self):
return self.panel_notebook.get_active_panel()
def focus_panel(self, panel_name):
self.panel_notebook.focus_panel(panel_name)
def get_panel(self, panel_name):
return self.panel_notebook.panels[panel_name].panel
def get_playlist_container(self):
return self.main.playlist_container
def quit(self):
self.main.playlist_container.save_current_tabs()
def add_device_panel(self, type, obj, device):
from xl.collection import CollectionScanThread
from xlgui.panel.device import DevicePanel, FlatPlaylistDevicePanel
import xlgui.panel
paneltype = DevicePanel
if hasattr(device, 'panel_type'):
if (device.panel_type == 'flatplaylist'):
paneltype = FlatPlaylistDevicePanel
elif issubclass(device.panel_type, xlgui.panel.Panel):
paneltype = device.panel_type
panel = paneltype(self.main.window, self.main, device, device.get_name())
do_sort = True
panel.connect('append-items', (lambda _panel, items, play: self.main.on_append_items(items, play, sort=do_sort)))
panel.connect('queue-items', (lambda _panel, items: self.main.on_append_items(items, queue=True, sort=do_sort)))
panel.connect('replace-items', (lambda _panel, items: self.main.on_append_items(items, replace=True, sort=do_sort)))
self.device_panels[device.get_name()] = panel
GLib.idle_add(providers.register, 'main-panel', panel)
thread = CollectionScanThread(device.get_collection())
thread.connect('done', panel.load_tree)
self.progress_manager.add_monitor(thread, _(('Scanning %s...' % device.name)), 'drive-harddisk')
def remove_device_panel(self, type, obj, device):
try:
providers.unregister('main-panel', self.device_panels[device.get_name()])
except ValueError:
logger.debug("Couldn't remove panel for %s", device.get_name())
del self.device_panels[device.get_name()]
def _setup_osx(self):
from AppKit import NSObject, NSApplication
import objc
try:
import gi
gi.require_version('GtkosxApplication', '1.0')
from gi.repository import GtkosxApplication
except (ValueError, ImportError):
logger.warning('importing GtkosxApplication failed, no native menus')
else:
osx_app = GtkosxApplication.Application()
osx_app.ready()
shared_app = NSApplication.sharedApplication()
gtk_delegate = shared_app.delegate()
other_self = self
class Delegate(NSObject):
(':#B')
def applicationShouldHandleReopen_hasVisibleWindows_(self, ns_app, flag):
logger.debug('osx: handle reopen')
return True
def applicationShouldTerminate_(self, sender):
logger.debug('osx: block termination')
other_self.main.quit()
return False
def applicationDockMenu_(self, sender):
return gtk_delegate.applicationDockMenu_(sender)
delegate = Delegate.alloc().init()
delegate.retain()
shared_app.setDelegate_(delegate) |
class Test_icmpv6_router_advert(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_default_args(self):
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(type_=icmpv6.ND_ROUTER_ADVERT, data=icmpv6.nd_router_advert())
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, six.binary_type(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, six.binary_type(buf[4:]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(type_=icmpv6.ND_ROUTER_ADVERT, data=icmpv6.nd_router_advert(options=[icmpv6.nd_option_sla()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, six.binary_type(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, six.binary_type(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, six.binary_type(buf[16:]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], (len(icmpv6.nd_option_sla()) // 8))
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(type_=icmpv6.ND_ROUTER_ADVERT, data=icmpv6.nd_router_advert(options=[icmpv6.nd_option_pi()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, six.binary_type(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, six.binary_type(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_pi._PACK_STR, six.binary_type(buf[16:]))
eq_(res[0], icmpv6.ND_OPTION_PI)
eq_(res[1], 4)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
eq_(res[5], 0)
eq_(res[6], 0)
eq_(res[7], addrconv.ipv6.text_to_bin('::'))
prev = ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6(type_=icmpv6.ND_ROUTER_ADVERT, data=icmpv6.nd_router_advert(options=[icmpv6.nd_option_sla(), icmpv6.nd_option_pi()]))
prev.serialize(ic, None)
buf = ic.serialize(bytearray(), prev)
res = struct.unpack(icmpv6.icmpv6._PACK_STR, six.binary_type(buf[:4]))
eq_(res[0], icmpv6.ND_ROUTER_ADVERT)
eq_(res[1], 0)
eq_(res[2], icmpv6_csum(prev, buf))
res = struct.unpack(icmpv6.nd_router_advert._PACK_STR, six.binary_type(buf[4:16]))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
res = struct.unpack(icmpv6.nd_option_sla._PACK_STR, six.binary_type(buf[16:24]))
eq_(res[0], icmpv6.ND_OPTION_SLA)
eq_(res[1], (len(icmpv6.nd_option_sla()) // 8))
eq_(res[2], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
res = struct.unpack(icmpv6.nd_option_pi._PACK_STR, six.binary_type(buf[24:]))
eq_(res[0], icmpv6.ND_OPTION_PI)
eq_(res[1], (len(icmpv6.nd_option_pi()) // 8))
eq_(res[2], 0)
eq_(res[3], 0)
eq_(res[4], 0)
eq_(res[5], 0)
eq_(res[6], 0)
eq_(res[7], addrconv.ipv6.text_to_bin('::'))
def test_json(self):
ic1 = icmpv6.icmpv6(type_=icmpv6.ND_ROUTER_ADVERT, data=icmpv6.nd_router_advert(options=[icmpv6.nd_option_sla(), icmpv6.nd_option_pi()]))
jsondict = ic1.to_jsondict()
ic2 = icmpv6.icmpv6.from_jsondict(jsondict['icmpv6'])
eq_(str(ic1), str(ic2)) |
def test_init_receives_args_and_kwargs(SMTestBase):
class StateMachine(SMTestBase):
def __init__(self, foo, bar=42, baz=31337):
assert (foo == 11)
assert (bar in (32, 42))
assert (baz in (69, 31337))
state_machine(StateMachine, 11, baz=69, settings={'max_examples': 2}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.