code stringlengths 281 23.7M |
|---|
def forward(model: Model[(NestedT[InItemT], NestedT[OutItemT])], Xnest: NestedT[InItemT], is_train: bool) -> Tuple[(NestedT[OutItemT], Callable)]:
layer: Model[(FlatT[InItemT], FlatT[OutItemT])] = model.layers[0]
(Xflat, lens) = _flatten(Xnest)
(Yflat, backprop_layer) = layer(Xflat, is_train)
Ynest = _unflatten(Yflat, lens)
def backprop(dYnest: NestedT[InItemT]) -> NestedT[OutItemT]:
(dYflat, _) = _flatten(dYnest)
dXflat = backprop_layer(dYflat)
dXnest = _unflatten(dXflat, lens)
return dXnest
return (Ynest, backprop) |
(('config_name', 'overrides', 'with_hydra', 'expected'), [param('select_multi', [], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi'), children=[ConfigDefault(path='group1/file1'), ConfigDefault(path='group1/file2'), ConfigDefault(path='_self_')]), id='select_multi'), param('select_multi', ['group1=[file1,file3]'], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi'), children=[ConfigDefault(path='group1/file1'), ConfigDefault(path='group1/file3'), ConfigDefault(path='_self_')]), id='select_multi:override_list'), param('select_multi', ['group1=[]'], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi'), children=[ConfigDefault(path='_self_')]), id='select_multi:override_to_empty_list'), param('select_multi', ['group1=file1'], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi'), children=[GroupDefault(group='group1', value='file1'), ConfigDefault(path='_self_')]), id='select_multi:override_to_option'), param('group1/select_multi', [], False, DefaultsTreeNode(node=ConfigDefault(path='group1/select_multi'), children=[ConfigDefault(path='group2/file1'), ConfigDefault(path='group2/file2'), ConfigDefault(path='_self_')]), id='group1/select_multi'), param('group1/select_multi', ['group1/group2=[file1,file3]'], False, DefaultsTreeNode(node=ConfigDefault(path='group1/select_multi'), children=[ConfigDefault(path='group2/file1'), ConfigDefault(path='group2/file3'), ConfigDefault(path='_self_')]), id='group1/select_multi:override'), param('select_multi_interpolation', [], False, raises(ConfigCompositionException, match=re.escape("In 'select_multi_interpolation': Defaults List interpolation is not supported in options list items")), id='select_multi_interpolation'), param('select_multi_override', [], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi_override'), children=[ConfigDefault(path='group1/file3'), ConfigDefault(path='group1/file1'), ConfigDefault(path='_self_')]), id='select_multi_override'), param('select_multi_optional', [], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi_optional'), children=[ConfigDefault(path='group1/not_found', deleted=True, optional=True), ConfigDefault(path='_self_')]), id='select_multi_optional'), param('select_multi_optional', ['group1=[file1,not_found2]'], False, DefaultsTreeNode(node=ConfigDefault(path='select_multi_optional'), children=[ConfigDefault(path='group1/file1', optional=True), ConfigDefault(path='group1/not_found2', deleted=True, optional=True), ConfigDefault(path='_self_')]), id='select_multi_optional:override'), param('empty', ['+group1=[file1]'], False, DefaultsTreeNode(node=ConfigDefault(path='empty'), children=[ConfigDefault(path='_self_'), ConfigDefault(path='group1/file1')]), id='append_new_list_to_a_config_without_a_defaults_list'), param(None, ['+group1=[file1]'], False, DefaultsTreeNode(node=ConfigDefault(path='_dummy_empty_config_'), children=[ConfigDefault(path='_self_'), ConfigDefault(path='group1/file1')]), id='append_new_list_to_without_a_primary_config'), param('empty', ['+group1=[file1]'], True, DefaultsTreeNode(node=VirtualRoot(), children=[DefaultsTreeNode(node=ConfigDefault(path='hydra/config'), children=[GroupDefault(group='help', value='default'), GroupDefault(group='output', value='default'), ConfigDefault(path='_self_')]), DefaultsTreeNode(node=ConfigDefault(path='empty'), children=[ConfigDefault(path='_self_'), ConfigDefault(path='group1/file1')])]), id='append_new_list_to_a_config_without_a_defaults_list+with_hydra'), param(None, ['+group1=[file1]'], True, DefaultsTreeNode(node=VirtualRoot(), children=[DefaultsTreeNode(node=ConfigDefault(path='hydra/config'), children=[GroupDefault(group='help', value='default'), GroupDefault(group='output', value='default'), ConfigDefault(path='_self_')]), DefaultsTreeNode(node=ConfigDefault(path='_dummy_empty_config_'), children=[ConfigDefault(path='_self_'), ConfigDefault(path='group1/file1')])]), id='append_new_list_to_without_a_primary_config+with_hydra')])
def test_select_multi(config_name: Optional[str], overrides: List[str], with_hydra: bool, expected: DefaultsTreeNode) -> None:
_test_defaults_tree_impl(config_name=config_name, input_overrides=overrides, prepend_hydra=with_hydra, expected=expected) |
()
('f', type=click.File('rb'), default='-')
('-a', 'algorithm', default=None, type=click.Choice(ALGOS), help='Only this algorithm (Default: all)')
def cmd_crypto_hasher(f, algorithm):
data = f.read()
if (not data):
print('Empty file or string!')
return 1
if algorithm:
print(hasher(data, algorithm)[algorithm], f.name)
else:
for (algo, result) in hasher(data).items():
print('{:<12} {} {}'.format(algo, result, f.name)) |
def test_different_folders(project, tmp_path):
with tmp_path.joinpath('brownie-config.yaml').open('w') as fp:
yaml.dump(structure, fp)
project.main._create_folders(Path(tmp_path))
for path in ('contracts', 'interfaces', 'scripts', 'reports', 'tests', 'build'):
assert (not Path(tmp_path).joinpath(path).exists())
for path in ('artifacts', 'sources', 'abi', 'logs', 'automation', 'checks'):
assert Path(tmp_path).joinpath(path).exists() |
class OptionSeriesHeatmapSonificationDefaultinstrumentoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsFunnel3dDatalabelsTextpath(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
def get_msg(self):
self.loginInfo['deviceid'] = ('e' + str(random.randint(0, (.0 - 1))).rjust(15, '0'))
url = ('%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (self.loginInfo['url'], self.loginInfo['wxsid'], self.loginInfo['skey'], self.loginInfo['pass_ticket']))
data = {'BaseRequest': self.loginInfo['BaseRequest'], 'SyncKey': self.loginInfo['SyncKey'], 'rr': (~ int(time.time()))}
headers = {'ContentType': 'application/json; charset=UTF-8', 'User-Agent': self.user_agent}
r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if (dic['BaseResponse']['Ret'] != 0):
return (None, None)
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join([('%s_%s' % (item['Key'], item['Val'])) for item in dic['SyncCheckKey']['List']])
return (dic['AddMsgList'], dic['ModContactList']) |
class EnumValueMixin(BaseModel):
def _to_enum_value(self, key, value):
field = self.__fields__[key]
if (not issubclass(field.type_, Enum)):
return value
if isinstance(value, list):
return [(v.value if isinstance(v, Enum) else v) for v in value]
return (value.value if isinstance(value, Enum) else value)
def dict(self, *args, **kwargs) -> 'DictStrAny':
res = super().dict(*args, **kwargs)
return {k: self._to_enum_value(k, v) for (k, v) in res.items()} |
def parse_nick_template(string, template_regex, outtemplate):
match = template_regex.match(string)
if match:
matchdict = {key: (value if (value is not None) else '') for (key, value) in match.groupdict().items()}
return (True, outtemplate.format_map(matchdict))
return (False, string) |
class Migration(migrations.Migration):
dependencies = [('manager', '0014_auto__1904')]
operations = [migrations.CreateModel(name='EventTag', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='This name will be used as a slug', max_length=50, unique=True, verbose_name='EventTag Name')), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')), ('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated At')), ('background', models.ImageField(help_text='A image to show in the background of', upload_to='')), ('logo_header', models.ImageField(blank=True, help_text='This logo will be showed in the corner right of the page', null=True, upload_to='')), ('logo_landing', models.ImageField(blank=True, help_text='Logo to show in the center of the page', null=True, upload_to='')), ('message', models.TextField(help_text='A message to show in the center of the page', max_length=280)), ('slug', models.SlugField(help_text='For example: flisol-caba', max_length=100, unique=True, verbose_name='URL'))]), migrations.AlterField(model_name='activity', name='additional_info', field=models.TextField(blank=True, help_text='Any info you consider relevant for the organizer: i.e. Write here if your activity has any special requirement', null=True, verbose_name='Additional Info')), migrations.AlterField(model_name='activity', name='is_dummy', field=models.BooleanField(default=False, help_text='A dummy activity is used for example for coffee breaks. We use this to exclude it from the index page and other places', verbose_name='Is a dummy Activity?')), migrations.AlterField(model_name='activity', name='labels', field=models.CharField(help_text='Comma separated tags. i.e. Linux, Free Software, Archlinux', max_length=200, verbose_name='Labels')), migrations.AlterField(model_name='activity', name='presentation', field=models.FileField(blank=True, help_text='Any material you are going to use for the talk (optional, but recommended)', null=True, upload_to='talks', verbose_name='Presentation')), migrations.AlterField(model_name='activity', name='speaker_contact', field=models.EmailField(help_text='Where can whe reach you from the organization team?', max_length=254, verbose_name='Speaker Contact')), migrations.AlterField(model_name='attendee', name='additional_info', field=models.CharField(blank=True, help_text='Any additional info you consider relevant for the organizers', max_length=200, null=True, verbose_name='Additional Info')), migrations.AlterField(model_name='collaborator', name='assignation', field=models.CharField(blank=True, help_text='Anything you can help with (i.e. Talks, Coffee...)', max_length=200, null=True, verbose_name='Assignation')), migrations.AlterField(model_name='collaborator', name='time_availability', field=models.CharField(blank=True, help_text='Time gap in which you can help during the event. i.e. "All the event", "Morning", "Afternoon", ...', max_length=200, null=True, verbose_name='Time Availability')), migrations.AlterField(model_name='installation', name='notes', field=models.TextField(blank=True, help_text='Any information or trouble you found and consider relevant to document', null=True, verbose_name='Notes')), migrations.AddField(model_name='event', name='tags', field=models.ManyToManyField(help_text='Select tags to show this event in the EventTag landing', to='manager.EventTag')), migrations.RunPython(fill_event_tags)] |
def add_errored_system_status_for_consent_reporting(db: Session, privacy_request: PrivacyRequest, connection_config: ConnectionConfig) -> None:
for pref in privacy_request.privacy_preferences:
if (pref.affected_system_status and (pref.affected_system_status.get(connection_config.system_key) == ExecutionLogStatus.pending.value)):
pref.cache_system_status(db, connection_config.system_key, ExecutionLogStatus.error) |
class BaseRuleTest(unittest.TestCase):
RULE_LOADER_FAIL = False
RULE_LOADER_FAIL_MSG = None
RULE_LOADER_FAIL_RAISED = False
def setUpClass(cls):
global RULE_LOADER_FAIL, RULE_LOADER_FAIL_MSG
if (not RULE_LOADER_FAIL):
try:
rc = default_rules()
rc_bbr = default_bbr()
cls.all_rules = rc.rules
cls.rule_lookup = rc.id_map
cls.production_rules = rc.filter(production_filter)
cls.bbr = rc_bbr.rules
cls.deprecated_rules: DeprecatedCollection = rc.deprecated
except Exception as e:
RULE_LOADER_FAIL = True
RULE_LOADER_FAIL_MSG = str(e)
def rule_str(rule: Union[(DeprecatedRule, TOMLRule)], trailer=' ->') -> str:
return f"{rule.id} - {rule.name}{(trailer or '')}"
def setUp(self) -> None:
global RULE_LOADER_FAIL, RULE_LOADER_FAIL_MSG, RULE_LOADER_FAIL_RAISED
if RULE_LOADER_FAIL:
if (not RULE_LOADER_FAIL_RAISED):
RULE_LOADER_FAIL_RAISED = True
with self.subTest('Test that the rule loader loaded with no validation or other failures.'):
self.fail(f'''Rule loader failure:
{RULE_LOADER_FAIL_MSG}''')
self.skipTest('Rule loader failure')
else:
super().setUp() |
class OptionSeriesDependencywheelSonificationDefaultspeechoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ManageForum(MethodView):
decorators = [login_required, allows.requires(IsAtleastModeratorInForum(), on_fail=FlashAndRedirect(message=_('You are not allowed to manage this forum'), level='danger', endpoint=(lambda *a, **k: url_for('forum.view_forum', forum_id=k['forum_id']))))]
def get(self, forum_id, slug=None):
(forum_instance, forumsread) = Forum.get_forum(forum_id=forum_id, user=real(current_user))
if forum_instance.external:
return redirect(forum_instance.external)
available_forums = Forum.query.order_by(Forum.position).all()
available_forums.remove(forum_instance)
page = request.args.get('page', 1, type=int)
topics = Forum.get_topics(forum_id=forum_instance.id, user=real(current_user), page=page, per_page=flaskbb_config['TOPICS_PER_PAGE'])
return render_template('forum/edit_forum.html', forum=forum_instance, topics=topics, available_forums=available_forums, forumsread=forumsread)
def post(self, forum_id, slug=None):
(forum_instance, __) = Forum.get_forum(forum_id=forum_id, user=real(current_user))
mod_forum_url = url_for('forum.manage_forum', forum_id=forum_instance.id, slug=forum_instance.slug)
ids = request.form.getlist('rowid')
tmp_topics = Topic.query.filter(Topic.id.in_(ids)).all()
if (not (len(tmp_topics) > 0)):
flash(_('In order to perform this action you have to select at least one topic.'), 'danger')
return redirect(mod_forum_url)
if ('lock' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='locked', reverse=False)
flash(_('%(count)s topics locked.', count=changed), 'success')
return redirect(mod_forum_url)
elif ('unlock' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='locked', reverse=True)
flash(_('%(count)s topics unlocked.', count=changed), 'success')
return redirect(mod_forum_url)
elif ('highlight' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='important', reverse=False)
flash(_('%(count)s topics highlighted.', count=changed), 'success')
return redirect(mod_forum_url)
elif ('trivialize' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='important', reverse=True)
flash(_('%(count)s topics trivialized.', count=changed), 'success')
return redirect(mod_forum_url)
elif ('delete' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='delete', reverse=False)
flash(_('%(count)s topics deleted.', count=changed), 'success')
return redirect(mod_forum_url)
elif ('move' in request.form):
new_forum_id = request.form.get('forum')
if (not new_forum_id):
flash(_('Please choose a new forum for the topics.'), 'info')
return redirect(mod_forum_url)
new_forum = Forum.query.filter_by(id=new_forum_id).first_or_404()
if (not Permission(And(IsAtleastModeratorInForum(forum_id=new_forum_id), IsAtleastModeratorInForum(forum=forum_instance)))):
flash(_('You do not have the permissions to move this topic.'), 'danger')
return redirect(mod_forum_url)
if new_forum.move_topics_to(tmp_topics):
flash(_('Topics moved.'), 'success')
else:
flash(_('Failed to move topics.'), 'danger')
return redirect(mod_forum_url)
elif ('hide' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='hide', reverse=False)
flash(_('%(count)s topics hidden.', count=changed), 'success')
return redirect(mod_forum_url)
elif ('unhide' in request.form):
changed = do_topic_action(topics=tmp_topics, user=real(current_user), action='unhide', reverse=False)
flash(_('%(count)s topics unhidden.', count=changed), 'success')
return redirect(mod_forum_url)
else:
flash(_('Unknown action requested'), 'danger')
return redirect(mod_forum_url) |
class JaxMonitorData(MonitorData, JaxObject, ABC):
def from_monitor_data(cls, mnt_data: MonitorData) -> JaxMonitorData:
self_dict = mnt_data.dict(exclude={'type'}).copy()
for field_name in cls.get_jax_field_names():
data_array = self_dict[field_name]
if (data_array is not None):
coords = {dim: data_array.coords[dim].values.tolist() for dim in data_array.coords.dims}
jax_amps = JaxDataArray(values=data_array.values, coords=coords)
self_dict[field_name] = jax_amps
return cls.parse_obj(self_dict)
def to_adjoint_sources(self, fwidth: float) -> List[Source]:
def make_source_time(amp_complex: complex, freq: float, fwidth: float) -> GaussianPulse:
amp = abs(amp_complex)
phase = np.angle((1j * amp_complex))
return GaussianPulse(freq0=freq, fwidth=fwidth, amplitude=amp, phase=phase)
def flip_direction(direction: str) -> str:
direction = str(direction)
if (direction == '+'):
return '-'
if (direction == '-'):
return '+'
raise AdjointError(f"Given a direction of '{direction}', expected '+' or '-'.") |
class CustomAudienceDocsTestCase(DocsTestCase):
def setUp(self):
ca = self.create_custom_audience()
DocsDataStore.set('ca_id', ca.get_id_assured())
def test_add_users(self):
custom_audience = CustomAudience(DocsDataStore.get('ca_id'))
response = custom_audience.add_users(schema=CustomAudience.Schema.email_hash, users=[''])
self.store_response(response)
def test_remove_users(self):
custom_audience = CustomAudience(DocsDataStore.get('ca_id'))
response = custom_audience.remove_users(schema=CustomAudience.Schema.email_hash, users=[''])
self.store_response(response)
def test_format_params(self):
formatted_params = CustomAudience.format_params(schema=CustomAudience.Schema.email_hash, users=[''])
self.store_response(formatted_params) |
('aea.cli.utils.decorators.try_to_load_agent_config')
('aea.cli.utils.decorators._validate_config_consistency')
class AddContractCommandTestCase(TestCase):
def setUp(self):
self.runner = CliRunner()
('aea.cli.add.add_item')
def test_add_contract_positive(self, *mocks):
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', 'contract', 'author/name:0.1.0'], standalone_mode=False)
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'contract', 'author/name:0.1.0'], standalone_mode=False)
self.assertEqual(result.exit_code, 0) |
class DEP001MissingDependenciesFinder(ViolationsFinder):
def find(self) -> list[Violation]:
logging.debug('\nScanning for missing dependencies...')
missing_dependencies: list[Violation] = []
for module_with_locations in self.imported_modules_with_locations:
module = module_with_locations.module
logging.debug('Scanning module %s...', module.name)
if self._is_missing(module):
for location in module_with_locations.locations:
missing_dependencies.append(DEP001MissingDependencyViolation(module, location))
return missing_dependencies
def _is_missing(self, module: Module) -> bool:
if any([(module.package is not None), module.is_provided_by_dependency, module.is_provided_by_dev_dependency, module.local_module]):
return False
if (module.name in self.ignored_modules):
logging.debug("Identified module '%s' as a missing dependency, but ignoring.", module.name)
return False
logging.debug("No package found to import module '%s' from. Marked as a missing dependency.", module.name)
return True |
class OptionPlotoptionsColumnSonificationPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
def test_utc_to_local_is_working_properly():
from stalker.models import utc_to_local
import datetime
import pytz
local_now = datetime.datetime.now()
utc_now = datetime.datetime.now(pytz.utc)
utc_without_tz = datetime.datetime(utc_now.year, utc_now.month, utc_now.day, utc_now.hour, utc_now.minute)
local_from_utc = utc_to_local(utc_without_tz)
assert (local_from_utc.year == local_now.year)
assert (local_from_utc.month == local_now.month)
assert (local_from_utc.day == local_now.day)
assert (local_from_utc.hour == local_now.hour)
assert (local_from_utc.minute == local_now.minute) |
def just_load_modules(pkgs: List[str]):
for package_name in pkgs:
package = importlib.import_module(package_name)
if (not hasattr(package, '__path__')):
continue
for (_, name, _) in pkgutil.walk_packages(package.__path__, prefix=f'{package_name}.'):
importlib.import_module(name) |
_ordering
class RouteTargetMembershipNLRI(StringifyMixin):
ROUTE_FAMILY = RF_RTC_UC
DEFAULT_AS = '0:0'
DEFAULT_RT = '0:0'
def __init__(self, origin_as, route_target):
if (not ((origin_as is self.DEFAULT_AS) and (route_target is self.DEFAULT_RT))):
if ((not self._is_valid_asn(origin_as)) or (not self._is_valid_ext_comm_attr(route_target))):
raise ValueError('Invalid params.')
self.origin_as = origin_as
self.route_target = route_target
def _is_valid_asn(self, asn):
if (isinstance(asn, six.integer_types) and (0 <= asn <= )):
return True
else:
return False
def _is_valid_ext_comm_attr(self, attr):
is_valid = True
if (not isinstance(attr, str)):
is_valid = False
else:
(first, second) = attr.split(':')
try:
if ('.' in first):
socket.inet_aton(first)
else:
int(first)
int(second)
except (ValueError, socket.error):
is_valid = False
return is_valid
def formatted_nlri_str(self):
return ('%s:%s' % (self.origin_as, self.route_target))
def is_default_rtnlri(self):
if ((self._origin_as is self.DEFAULT_AS) and (self._route_target is self.DEFAULT_RT)):
return True
return False
def __lt__(self, other):
return ((self.origin_as, self.route_target) < (other.origin_as, other.route_target))
def __eq__(self, other):
return ((self.origin_as, self.route_target) == (other.origin_as, other.route_target))
def __hash__(self):
return hash((self.origin_as, self.route_target))
def parser(cls, buf):
idx = 0
(origin_as,) = struct.unpack_from('!I', buf, idx)
idx += 4
route_target = _ExtendedCommunity(buf[idx:])
return cls(origin_as, route_target)
def serialize(self):
rt_nlri = b''
if (not self.is_default_rtnlri()):
rt_nlri += struct.pack('!I', self.origin_as)
rt_nlri += self.route_target.serialize()
return (struct.pack('B', (8 * 12)) + rt_nlri) |
class PeerCountReporterComponent(AsyncioIsolatedComponent):
name = 'Peer Count Reporter'
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
arg_parser.add_argument('--report-peer-count', action='store_true', help='Report peer count to console')
def is_enabled(self) -> bool:
return bool(self._boot_info.args.report_peer_count)
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
service = PeerCountReporter(event_bus)
async with background_asyncio_service(service) as manager:
(await manager.wait_finished()) |
.future_test_attributes
def test_count0_value_bit(tmpdir, merge_files_oneLR):
path = os.path.join(str(tmpdir), 'count0-value-bit.dlis')
content = ['data/chap3/start.dlis.part', 'data/chap3/template/default.dlis.part', 'data/chap3/object/object.dlis.part', 'data/chap3/objattr/count0-value-bit.dlis.part']
merge_files_oneLR(path, content)
with dlis.load(path) as (f, *tail):
obj = f.object('VERY_MUCH_TESTY_SET', 'OBJECT', 1, 1)
attr = obj.attic['DEFAULT_ATTRIBUTE']
assert (attr.value == None) |
def test_rlp_encode_20_elem_byte_uint_combo() -> None:
raw_data = (([Uint(35)] * 10) + ([b'hello'] * 10))
expected = ((bytearray([248]) + b'F') + b'\x85hello\x85hello\x85hello\x85hello\x85hello\x85hello\x85hello\x85hello\x85hello\x85hello')
assert (rlp.encode_sequence(raw_data) == expected) |
def fence(node: RenderTreeNode, context: RenderContext) -> str:
info_str = node.info.strip()
lang = (info_str.split(maxsplit=1)[0] if info_str else '')
code_block = node.content
fence_char = ('~' if ('`' in info_str) else '`')
if (lang in context.options.get('codeformatters', {})):
fmt_func = context.options['codeformatters'][lang]
try:
code_block = fmt_func(code_block, info_str)
except Exception:
assert (node.map is not None), 'A fence token must have `map` attribute set'
filename = context.options.get('mdformat', {}).get('filename', '')
warn_msg = f'Failed formatting content of a {lang} code block (line {(node.map[0] + 1)} before formatting)'
if filename:
warn_msg += f'. Filename: {filename}'
LOGGER.warning(warn_msg)
fence_len = max(3, (longest_consecutive_sequence(code_block, fence_char) + 1))
fence_str = (fence_char * fence_len)
return f'''{fence_str}{info_str}
{code_block}{fence_str}''' |
class CloudAssetCrawlerTest(CrawlerBase):
def setUp(self):
CrawlerBase.setUp(self)
self.inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID, '', {}, 0, {'enabled': True, 'gcs_path': 'gs://test-bucket'})
self.inventory_config.set_service_config(FakeServerConfig('mock_engine'))
self.maxDiff = None
def _run_crawler(self, config):
def _fake_download(full_bucket_path, output_file):
if ('resource' in full_bucket_path):
fake_file = os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump')
elif ('iam_policy' in full_bucket_path):
fake_file = os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump')
with open(fake_file, 'rb') as f:
output_file.write(f.read())
with MemoryStorage() as storage:
progresser = NullProgresser()
with gcp_api_mocks.mock_gcp() as gcp_mocks:
gcp_mocks.mock_storage.download.side_effect = _fake_download
run_crawler(storage, progresser, config, parallel=False, threads=1)
self.assertEqual(0, progresser.errors, 'No errors should have occurred')
return self._get_resource_counts_from_storage(storage)
def tearDown(self):
CrawlerBase.tearDown(self)
mock.patch.stopall()
def test_cai_crawl_to_memory(self):
result_counts = self._run_crawler(self.inventory_config)
expected_counts = copy.deepcopy(GCP_API_RESOURCES)
expected_counts.update({'backendservice': {'resource': 2}, 'bigquery_table': {'resource': 1}, 'bigtable_cluster': {'resource': 1}, 'bigtable_instance': {'resource': 1}, 'bigtable_table': {'resource': 1}, 'cloudsqlinstance': {'resource': 2}, 'compute_address': {'resource': 2}, 'compute_autoscaler': {'resource': 1}, 'compute_backendbucket': {'resource': 1}, 'compute_healthcheck': {'resource': 1}, 'compute_ {'resource': 1}, 'compute_ {'resource': 1}, 'compute_interconnect': {'resource': 1}, 'compute_interconnect_attachment': {'resource': 1}, 'compute_license': {'resource': 1}, 'compute_router': {'resource': 1}, 'compute_securitypolicy': {'resource': 1}, 'compute_sslcertificate': {'resource': 1}, 'compute_target {'resource': 1}, 'compute_target {'resource': 1}, 'compute_targetinstance': {'resource': 1}, 'compute_targetpool': {'resource': 1}, 'compute_targetsslproxy': {'resource': 1}, 'compute_targettcpproxy': {'resource': 1}, 'compute_targetvpngateway': {'resource': 1}, 'compute_urlmap': {'resource': 1}, 'compute_vpntunnel': {'resource': 1}, 'dataproc_cluster': {'resource': 2, 'iam_policy': 1}, 'dataset': {'dataset_policy': 2, 'iam_policy': 2, 'resource': 3}, 'disk': {'resource': 5}, 'dns_managedzone': {'resource': 1}, 'dns_policy': {'resource': 1}, 'forwardingrule': {'resource': 2}, 'kms_cryptokey': {'iam_policy': 1, 'resource': 1}, 'kms_cryptokeyversion': {'resource': 1}, 'kms_keyring': {'iam_policy': 1, 'resource': 1}, 'kubernetes_cluster': {'resource': 1, 'service_config': 1}, 'kubernetes_clusterrole': {'resource': 1}, 'kubernetes_clusterrolebinding': {'resource': 1}, 'kubernetes_namespace': {'resource': 1}, 'kubernetes_node': {'resource': 1}, 'kubernetes_pod': {'resource': 1}, 'kubernetes_role': {'resource': 1}, 'kubernetes_rolebinding': {'resource': 1}, 'kubernetes_service': {'resource': 1}, 'pubsub_subscription': {'iam_policy': 1, 'resource': 1}, 'pubsub_topic': {'iam_policy': 1, 'resource': 1}, 'service': {'resource': 1}, 'serviceaccount': {'iam_policy': 2, 'resource': 3}, 'serviceaccount_key': {'resource': 1}, 'spanner_database': {'resource': 1}, 'spanner_instance': {'resource': 1}})
del expected_counts['crm_org_policy']
self.assertEqual(expected_counts, result_counts)
def test_crawl_cai_api_polling_disabled(self):
self.inventory_config.api_quota_configs = {'admin': {'disable_polling': True}, 'appengine': {'disable_polling': True}, 'bigquery': {'disable_polling': True}, 'cloudbilling': {'disable_polling': True}, 'compute': {'disable_polling': True}, 'container': {'disable_polling': True}, 'crm': {'disable_polling': True}, 'iam': {'disable_polling': True}, 'logging': {'disable_polling': True}, 'servicemanagement': {'disable_polling': True}, 'serviceusage': {'disable_polling': True}, 'sqladmin': {'disable_polling': True}, 'storage': {'disable_polling': True}}
result_counts = self._run_crawler(self.inventory_config)
expected_counts = {'appengine_app': {'resource': 2}, 'appengine_service': {'resource': 1}, 'appengine_version': {'resource': 1}, 'backendservice': {'resource': 2}, 'bigtable_cluster': {'resource': 1}, 'bigtable_instance': {'resource': 1}, 'bigtable_table': {'resource': 1}, 'billing_account': {'iam_policy': 2, 'resource': 2}, 'bigquery_table': {'resource': 1}, 'bucket': {'gcs_policy': 2, 'iam_policy': 2, 'resource': 2}, 'cloudsqlinstance': {'resource': 2}, 'compute_address': {'resource': 2}, 'compute_autoscaler': {'resource': 1}, 'compute_backendbucket': {'resource': 1}, 'compute_healthcheck': {'resource': 1}, 'compute_ {'resource': 1}, 'compute_ {'resource': 1}, 'compute_interconnect': {'resource': 1}, 'compute_interconnect_attachment': {'resource': 1}, 'compute_license': {'resource': 1}, 'compute_project': {'resource': 2}, 'compute_router': {'resource': 1}, 'compute_securitypolicy': {'resource': 1}, 'compute_sslcertificate': {'resource': 1}, 'compute_target {'resource': 1}, 'compute_target {'resource': 1}, 'compute_targetinstance': {'resource': 1}, 'compute_targetpool': {'resource': 1}, 'compute_targetsslproxy': {'resource': 1}, 'compute_targettcpproxy': {'resource': 1}, 'compute_targetvpngateway': {'resource': 1}, 'compute_urlmap': {'resource': 1}, 'compute_vpntunnel': {'resource': 1}, 'dataproc_cluster': {'resource': 2, 'iam_policy': 1}, 'dataset': {'dataset_policy': 2, 'iam_policy': 2, 'resource': 3}, 'disk': {'resource': 5}, 'dns_managedzone': {'resource': 1}, 'dns_policy': {'resource': 1}, 'firewall': {'resource': 7}, 'folder': {'iam_policy': 3, 'resource': 3}, 'forwardingrule': {'resource': 2}, 'image': {'resource': 2}, 'instance': {'resource': 4}, 'instancegroup': {'resource': 2}, 'instancegroupmanager': {'resource': 2}, 'instancetemplate': {'resource': 2}, 'kms_cryptokey': {'iam_policy': 1, 'resource': 1}, 'kms_cryptokeyversion': {'resource': 1}, 'kms_keyring': {'iam_policy': 1, 'resource': 1}, 'kubernetes_cluster': {'resource': 1}, 'kubernetes_clusterrole': {'resource': 1}, 'kubernetes_clusterrolebinding': {'resource': 1}, 'kubernetes_namespace': {'resource': 1}, 'kubernetes_node': {'resource': 1}, 'kubernetes_pod': {'resource': 1}, 'kubernetes_role': {'resource': 1}, 'kubernetes_rolebinding': {'resource': 1}, 'kubernetes_service': {'resource': 1}, 'network': {'resource': 2}, 'organization': {'iam_policy': 1, 'resource': 1}, 'project': {'iam_policy': 4, 'resource': 4}, 'pubsub_subscription': {'iam_policy': 1, 'resource': 1}, 'pubsub_topic': {'iam_policy': 1, 'resource': 1}, 'role': {'resource': 2}, 'service': {'resource': 1}, 'serviceaccount': {'iam_policy': 2, 'resource': 3}, 'serviceaccount_key': {'resource': 1}, 'snapshot': {'resource': 3}, 'spanner_database': {'resource': 1}, 'spanner_instance': {'resource': 1}, 'subnetwork': {'resource': 24}}
self.assertEqual(expected_counts, result_counts)
def test_crawl_cai_data_with_asset_types(self):
asset_types = ['cloudresourcemanager.googleapis.com/Folder', 'cloudresourcemanager.googleapis.com/Organization', 'cloudresourcemanager.googleapis.com/Project']
inventory_config = InventoryConfig(gcp_api_mocks.ORGANIZATION_ID, '', {}, 0, {'enabled': True, 'gcs_path': 'gs://test-bucket', 'asset_types': asset_types})
inventory_config.set_service_config(FakeServerConfig('fake_engine'))
filtered_assets = []
with open(os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_resources.dump'), 'r') as f:
for line in f:
if any(((('"%s"' % asset_type) in line) for asset_type in asset_types)):
filtered_assets.append(line)
filtered_assets = ''.join(filtered_assets)
filtered_iam = []
with open(os.path.join(TEST_RESOURCE_DIR_PATH, 'mock_cai_iam_policies.dump'), 'r') as f:
for line in f:
if any(((('"%s"' % asset_type) in line) for asset_type in asset_types)):
filtered_iam.append(line)
filtered_iam = ''.join(filtered_iam)
with unittest_utils.create_temp_file(filtered_assets) as resources:
with unittest_utils.create_temp_file(filtered_iam) as iam_policies:
def _fake_download(full_bucket_path, output_file):
if ('resource' in full_bucket_path):
fake_file = resources
elif ('iam_policy' in full_bucket_path):
fake_file = iam_policies
with open(fake_file, 'rb') as f:
output_file.write(f.read())
with MemoryStorage() as storage:
progresser = NullProgresser()
with gcp_api_mocks.mock_gcp() as gcp_mocks:
gcp_mocks.mock_storage.download.side_effect = _fake_download
run_crawler(storage, progresser, inventory_config)
expected_calls = [mock.call(gcp_api_mocks.ORGANIZATION_ID, output_config=mock.ANY, content_type='RESOURCE', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY), mock.call(gcp_api_mocks.ORGANIZATION_ID, output_config=mock.ANY, content_type='IAM_POLICY', asset_types=asset_types, blocking=mock.ANY, timeout=mock.ANY)]
gcp_mocks.mock_cloudasset.export_assets.assert_has_calls(expected_calls, any_order=True)
self.assertEqual(0, progresser.errors, 'No errors should have occurred')
result_counts = self._get_resource_counts_from_storage(storage)
expected_counts = {'folder': {'iam_policy': 3, 'resource': 3}, 'gsuite_group': {'resource': 4}, 'gsuite_group_member': {'resource': 1}, 'gsuite_groups_settings': {'resource': 4}, 'gsuite_user': {'resource': 4}, 'gsuite_user_member': {'resource': 3}, 'lien': {'resource': 1}, 'organization': {'iam_policy': 1, 'resource': 1}, 'project': {'billing_info': 4, 'enabled_apis': 4, 'iam_policy': 4, 'resource': 4}, 'role': {'resource': 18}, 'sink': {'resource': 6}}
self.assertEqual(expected_counts, result_counts) |
def test_import_export_rmsasc(tmp_path, simple_well):
t0 = xtg.timer()
wname = (tmp_path / '$random').with_suffix('.rmsasc')
wuse = simple_well.to_file(wname)
print('Time for save RMSASC: ', xtg.timer(t0))
t0 = xtg.timer()
result = xtgeo.well_from_file(wuse)
assert result.dataframe.equals(result.dataframe)
print('Time for load RMSASC: ', xtg.timer(t0)) |
def run_queries(name: str, queries: Sequence[Query]) -> Sequence[Result]:
jsonified = []
byid = {}
for q in queries:
jsonified.append(q.as_json())
byid[id(q)] = q
path_urls = f'/tmp/kat-client-{name}-urls.json'
path_results = f'/tmp/kat-client-{name}-results.json'
path_log = f'/tmp/kat-client-{name}.log'
with open(path_urls, 'w') as f:
json.dump(jsonified, f)
res = ShellCommand.run('Running queries', f"tools/bin/kubectl exec -n default -i kat -- /work/kat_client < '{path_urls}' > '{path_results}' 2> '{path_log}'", shell=True)
if (not res):
ret = [Result(q, {'error': 'Command execution error'}) for q in queries]
return ret
with open(path_results, 'r') as f:
content = f.read()
try:
json_results = json.loads(content)
except Exception as e:
ret = [Result(q, {'error': 'Could not parse JSON content after running KAT queries'}) for q in queries]
return ret
results = []
for r in json_results:
res = r['result']
q = byid[r['id']]
results.append(Result(q, res))
return results |
class CommonCrawlExtractor():
__warc_path = None
__local_download_dir_warc = './cc_download_warc/'
__filter_valid_hosts = []
__filter_start_date = None
__filter_end_date = None
__filter_strict_date = True
__reuse_previously_downloaded_files = True
__continue_after_error = False
__ignore_unicode_errors = False
__fetch_images = False
__log_level = logging.INFO
__delete_warc_after_extraction = True
__log_pathname_fully_extracted_warcs = None
__cc_base_url = '
__cc_bucket = 'commoncrawl'
__cc_news_crawl_names = None
__callback_on_article_extracted = None
__callback_on_warc_completed = None
__show_download_progress = False
logging.basicConfig(level=__log_level)
__logger = logging.getLogger(__name__)
def __setup(self):
os.makedirs(self.__local_download_dir_warc, exist_ok=True)
configure_logging({'LOG_LEVEL': 'ERROR'})
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('readability').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL)
logging.getLogger('newspaper').setLevel(logging.CRITICAL)
logging.getLogger('newsplease').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
boto3.set_stream_logger('botocore', self.__log_level)
boto3.set_stream_logger('boto3', self.__log_level)
boto3.set_stream_logger('s3transfer', self.__log_level)
logging.basicConfig(level=self.__log_level)
self.__logger = logging.getLogger(__name__)
self.__logger.setLevel(self.__log_level)
def __register_fully_extracted_warc_file(self, warc_path):
if (self.__log_pathname_fully_extracted_warcs is not None):
with open(self.__log_pathname_fully_extracted_warcs, 'a') as log_file:
log_file.write((warc_path + '\n'))
def filter_record(self, warc_record, article=None):
if self.__filter_valid_hosts:
url = warc_record.rec_headers.get_header('WARC-Target-URI')
for valid_host in self.__filter_valid_hosts:
if (valid_host in url):
break
else:
return (False, article)
if (self.__filter_start_date or self.__filter_end_date):
if (not article):
article = self._from_warc(warc_record)
publishing_date = self.__get_publishing_date(warc_record, article)
if (not publishing_date):
if self.__filter_strict_date:
return (False, article)
else:
if (self.__filter_start_date and (publishing_date < self.__filter_start_date)):
return (False, article)
if (self.__filter_end_date and (publishing_date > self.__filter_end_date)):
return (False, article)
return (True, article)
def __get_publishing_date(self, warc_record, article):
if hasattr(article, 'date_publish'):
return (parser.parse(article.date_publish) if isinstance(article.date_publish, str) else article.date_publish)
else:
return None
def __get_remote_index(self):
return commoncrawl_crawler.__get_remote_index()
def __on_download_progress_update(self, blocknum, blocksize, totalsize):
if (not self.__show_download_progress):
return
readsofar = (blocknum * blocksize)
if (totalsize > 0):
s = ('\r%s / %s' % (size(readsofar), size(totalsize)))
sys.stdout.write(s)
if (readsofar >= totalsize):
sys.stderr.write('\r')
else:
sys.stdout.write(('\rread %s' % size(readsofar)))
def __download(self, path):
local_filename = urllib.parse.quote_plus(path)
local_filepath = os.path.join(self.__local_download_dir_warc, local_filename)
if (os.path.isfile(local_filepath) and self.__reuse_previously_downloaded_files):
self.__logger.info('found local file %s, not downloading again due to configuration', local_filepath)
return local_filepath
else:
try:
os.remove(local_filepath)
except OSError:
pass
if self.__s3_client:
with open(local_filepath, 'wb') as file_obj:
self.__s3_client.download_fileobj(self.__cc_bucket, path, file_obj)
return local_filepath
else:
url = (self.__cc_base_url + path)
self.__logger.info('downloading %s (local: %s)', url, local_filepath)
urllib.request.urlretrieve(url, local_filepath, reporthook=self.__on_download_progress_update)
self.__logger.info('download completed, local file: %s', local_filepath)
return local_filepath
def _from_warc(self, record):
return NewsPlease.from_warc(record, decode_errors=('replace' if self.__ignore_unicode_errors else 'strict'), fetch_images=self.__fetch_images)
def __process_warc_gz_file(self, path_name):
counter_article_total = 0
counter_article_passed = 0
counter_article_discarded = 0
counter_article_error = 0
start_time = time.time()
with open(path_name, 'rb') as stream:
for record in ArchiveIterator(stream):
try:
if (record.rec_type == 'response'):
counter_article_total += 1
try:
(filter_pass, article) = self.filter_record(record)
except (UnicodeDecodeError, EmptyResponseError):
filter_pass = False
if filter_pass:
try:
if (not article):
article = self._from_warc(record)
except (UnicodeDecodeError, EmptyResponseError):
filter_pass = False
if filter_pass:
counter_article_passed += 1
self.__logger.info('article pass (%s; %s; %s)', article.source_domain, article.date_publish, article.title)
self.__callback_on_article_extracted(article)
else:
counter_article_discarded += 1
if article:
self.__logger.info('article discard (%s; %s; %s)', article.source_domain, article.date_publish, article.title)
else:
self.__logger.info('article discard (%s)', record.rec_headers.get_header('WARC-Target-URI'))
if ((counter_article_total % 10) == 0):
elapsed_secs = (time.time() - start_time)
secs_per_article = (elapsed_secs / counter_article_total)
self.__logger.info('statistics')
self.__logger.info('pass = %i, discard = %i, error = %i, total = %i', counter_article_passed, counter_article_discarded, counter_article_error, counter_article_total)
self.__logger.info('extraction from current WARC file started %s; %f s/article', human(start_time), secs_per_article)
except:
if self.__continue_after_error:
self.__logger.error('Unexpected error: %s (%s)', *sys.exc_info()[0:2])
self.__logger.error(sys.exc_info()[2], exc_info=True)
counter_article_error += 1
pass
else:
raise
if self.__delete_warc_after_extraction:
os.remove(path_name)
self.__register_fully_extracted_warc_file(self.__warc_path)
self.__callback_on_warc_completed(self.__warc_path, counter_article_passed, counter_article_discarded, counter_article_error, counter_article_total)
def __run(self):
self.__setup()
local_path_name = self.__download(self.__warc_path)
self.__process_warc_gz_file(local_path_name)
def extract_from_commoncrawl(self, warc_path, callback_on_article_extracted, callback_on_warc_completed=None, valid_hosts=None, start_date=None, end_date=None, strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None, continue_after_error=True, ignore_unicode_errors=False, show_download_progress=False, log_level=logging.ERROR, delete_warc_after_extraction=True, log_pathname_fully_extracted_warcs=None, fetch_images=False):
self.__warc_path = warc_path
self.__filter_valid_hosts = valid_hosts
self.__filter_start_date = start_date
self.__filter_end_date = end_date
self.__filter_strict_date = strict_date
if local_download_dir_warc:
self.__local_download_dir_warc = local_download_dir_warc
self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files
self.__continue_after_error = continue_after_error
self.__ignore_unicode_errors = ignore_unicode_errors
self.__fetch_images = fetch_images
self.__callback_on_article_extracted = callback_on_article_extracted
self.__callback_on_warc_completed = callback_on_warc_completed
self.__show_download_progress = show_download_progress
self.__log_level = log_level
self.__delete_warc_after_extraction = delete_warc_after_extraction
self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs
self.__s3_client = None
try:
s3_client = boto3.client('s3')
s3_client.head_bucket(Bucket=self.__cc_bucket)
self.__s3_client = s3_client
except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError):
self.__logger.info('Failed to read %s bucket, using monthly WARC file listings', self.__cc_bucket)
self.__run() |
def extractTaholtorfWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_gini_gain():
assert (round(gini_gain([1, 1, 1, 1, 0, 0, 0, 0], [[1, 1, 1, 1], [0, 0, 0, 0]]), 3) == 0.5)
assert (round(gini_gain([1, 1, 1, 1, 0, 0, 0, 0], [[1, 1, 1, 0], [0, 0, 0, 1]]), 3) == 0.125)
assert (round(gini_gain([1, 1, 1, 1, 0, 0, 0, 0], [[1, 0, 0, 0], [0, 1, 1, 1]]), 3) == 0.125)
assert (round(gini_gain([1, 1, 1, 1, 0, 0, 0, 0], [[1, 1, 0, 0], [0, 0, 1, 1]]), 3) == 0.0) |
class OptionSeriesPackedbubbleSonification(Options):
def contextTracks(self) -> 'OptionSeriesPackedbubbleSonificationContexttracks':
return self._config_sub_data('contextTracks', OptionSeriesPackedbubbleSonificationContexttracks)
def defaultInstrumentOptions(self) -> 'OptionSeriesPackedbubbleSonificationDefaultinstrumentoptions':
return self._config_sub_data('defaultInstrumentOptions', OptionSeriesPackedbubbleSonificationDefaultinstrumentoptions)
def defaultSpeechOptions(self) -> 'OptionSeriesPackedbubbleSonificationDefaultspeechoptions':
return self._config_sub_data('defaultSpeechOptions', OptionSeriesPackedbubbleSonificationDefaultspeechoptions)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def pointGrouping(self) -> 'OptionSeriesPackedbubbleSonificationPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesPackedbubbleSonificationPointgrouping)
def tracks(self) -> 'OptionSeriesPackedbubbleSonificationTracks':
return self._config_sub_data('tracks', OptionSeriesPackedbubbleSonificationTracks) |
def test_EIP155_transaction_sender_extraction(txn_fixture):
key = keys.PrivateKey(decode_hex(txn_fixture['key']))
transaction = rlp.decode(decode_hex(txn_fixture['signed']), sedes=SpuriousDragonTransaction)
sender = extract_transaction_sender(transaction)
assert is_same_address(sender, transaction.sender)
assert is_same_address(sender, key.public_key.to_canonical_address()) |
def process_twitter(args):
print('')
print('# Process Twitter')
print('')
op = OperatorTwitter()
data = op.readFromJson(args.data_folder, args.run_id, 'twitter.json')
data_deduped = op.dedup(data, target='toread')
data_scored = op.score(data_deduped, start_date=args.start, max_distance=args.max_distance)
data_filtered = op.filter(data_scored, min_score=4)
data_ranked = op.rank(data_filtered, min_score=args.min_score_to_rank)
targets = args.targets.split(',')
pushed_stats = op.push(data_ranked, targets, args.topics_top_k, args.categories_top_k)
op.printStats('Twitter', data, data_deduped, data_ranked)
return op.createStats(data, data_deduped, data_scored, data_filtered, data_ranked, pushed_stats) |
class TestFuseDuplicateFusedElementwise(unittest.TestCase):
SHAPE = [32, 64, 100]
def _count_fused_elementwise_ops(graph: List[Tensor], target_elementwise_ops: List[FuncEnum]) -> int:
fused_elementwise_ops = filter((lambda op: (op._attrs['op'] == 'fused_elementwise')), get_sorted_ops(graph))
count = 0
for op in fused_elementwise_ops:
elementwise_ops = op._attrs['elementwise_ops']
if (len(target_elementwise_ops) != len(elementwise_ops)):
continue
if all((is_elementwise_type(op, target) for (op, target) in zip(elementwise_ops, target_elementwise_ops))):
count += 1
return count
def test_fuse_duplicates(self):
x = gen_input_tensor(shape=self.SHAPE, name='input_x')
sigmoid1 = ops.elementwise(FuncEnum.SIGMOID)(x)
sigmoid2 = ops.elementwise(FuncEnum.SIGMOID)(x)
softmax1 = ops.softmax()(sigmoid1, dim=0)
softmax2 = ops.softmax()(sigmoid2, dim=0)
model_output = (softmax1 + softmax2)
model_output._attrs['is_output'] = True
model_output._attrs['name'] = 'output'
x_pt = get_random_torch_tensor(self.SHAPE)
sigmoid1_pt = torch.sigmoid(x_pt)
sigmoid2_pt = torch.sigmoid(x_pt)
softmax1_pt = torch.nn.functional.softmax(sigmoid1_pt, dim=0)
softmax2_pt = torch.nn.functional.softmax(sigmoid2_pt, dim=0)
y_pt = (softmax1_pt + softmax2_pt)
y_ait = torch.empty_like(y_pt)
with compile_model(model_output, detect_target(), '/tmp', 'fuse_duplicate_fused_elementwise_dups') as module:
module.run_with_tensors({'input_x': x_pt}, {'output': y_ait})
nsigmoid = self._count_fused_elementwise_ops(module.debug_sorted_graph, [FuncEnum.SIGMOID])
self.assertEqual(nsigmoid, 1)
self.assertTrue(torch.allclose(y_pt, y_ait, atol=0.01, rtol=0.01))
def test_fuse_duplicates_with_concat_output_accessor(self):
x = gen_input_tensor(shape=self.SHAPE, name='input_x')
sigmoid1 = ops.elementwise(FuncEnum.SIGMOID)(x)
sigmoid2 = ops.elementwise(FuncEnum.SIGMOID)(x)
model_output = ops.concatenate()([sigmoid1, sigmoid2])
model_output._attrs['is_output'] = True
model_output._attrs['name'] = 'output'
x_pt = get_random_torch_tensor(self.SHAPE)
sigmoid1_pt = torch.sigmoid(x_pt)
sigmoid2_pt = torch.sigmoid(x_pt)
y_pt = torch.concat([sigmoid1_pt, sigmoid2_pt])
y_ait = torch.empty_like(y_pt)
with compile_model(model_output, detect_target(), '/tmp', 'fuse_duplicate_fused_elementwise_dups_with_accessors') as module:
module.run_with_tensors({'input_x': x_pt}, {'output': y_ait})
nsigmoid = self._count_fused_elementwise_ops(module.debug_sorted_graph, [FuncEnum.SIGMOID])
self.assertEqual(nsigmoid, 1)
self.assertTrue(torch.allclose(y_pt, y_ait, atol=0.01, rtol=0.01))
def test_dont_fuse_non_duplicates(self):
x = gen_input_tensor(shape=self.SHAPE, name='input_x')
z = gen_input_tensor(shape=self.SHAPE, name='input_z')
relu_x = ops.elementwise(FuncEnum.RELU)(x)
gelu_x = ops.elementwise(FuncEnum.GELU)(x)
gelu_z = ops.elementwise(FuncEnum.GELU)(z)
softmax1 = ops.softmax()(relu_x, dim=0)
softmax2 = ops.softmax()(gelu_x, dim=0)
softmax3 = ops.softmax()(gelu_z, dim=0)
model_output = ((softmax1 + softmax2) + softmax3)
model_output._attrs['is_output'] = True
model_output._attrs['name'] = 'output'
x_pt = get_random_torch_tensor(self.SHAPE)
z_pt = get_random_torch_tensor(self.SHAPE)
relu_x_pt = torch.nn.functional.relu(x_pt)
gelu_x_pt = torch.nn.functional.gelu(x_pt)
gelu_z_pt = torch.nn.functional.gelu(z_pt)
softmax1_pt = torch.nn.functional.softmax(relu_x_pt, dim=0)
softmax2_pt = torch.nn.functional.softmax(gelu_x_pt, dim=0)
softmax3_pt = torch.nn.functional.softmax(gelu_z_pt, dim=0)
y_pt = ((softmax1_pt + softmax2_pt) + softmax3_pt)
y_ait = torch.empty_like(y_pt)
with compile_model(model_output, detect_target(), '/tmp', 'fuse_duplicate_fused_elementwise_non_dups') as module:
module.run_with_tensors({'input_x': x_pt, 'input_z': z_pt}, {'output': y_ait})
graph = module.debug_sorted_graph
nrelu = self._count_fused_elementwise_ops(graph, [FuncEnum.RELU])
ngelu = self._count_fused_elementwise_ops(graph, [FuncEnum.GELU])
self.assertEqual(nrelu, 1)
self.assertEqual(ngelu, 2)
self.assertTrue(torch.allclose(y_pt, y_ait, atol=0.01, rtol=0.01))
def test_all_interactions(self):
x = gen_input_tensor(shape=self.SHAPE, name='input_x')
z = gen_input_tensor(shape=self.SHAPE, name='input_z')
p = gen_input_tensor(shape=self.SHAPE, name='input_p')
relu1 = ops.elementwise(FuncEnum.RELU)(x)
tanh = ops.elementwise(FuncEnum.TANH)(relu1)
concat1 = ops.concatenate()([relu1, tanh])
relu2 = ops.elementwise(FuncEnum.RELU)(x)
concat2 = ops.concatenate()([relu2, p])
relu3 = ops.elementwise(FuncEnum.RELU)(x)
softmax = ops.softmax()(relu3, dim=0)
concat3 = ops.concatenate()([softmax, softmax])
gelu = ops.elementwise(FuncEnum.GELU)(x)
relu4 = ops.elementwise(FuncEnum.RELU)(z)
concat4 = ops.concatenate()([relu4, gelu])
model_output = (((concat1 + concat2) + concat3) + concat4)
model_output._attrs['is_output'] = True
model_output._attrs['name'] = 'output'
x_pt = get_random_torch_tensor(self.SHAPE)
z_pt = get_random_torch_tensor(self.SHAPE)
p_pt = get_random_torch_tensor(self.SHAPE)
relu1_pt = torch.nn.functional.relu(x_pt)
tanh_pt = torch.nn.functional.tanh(relu1_pt)
concat1_pt = torch.concat([relu1_pt, tanh_pt])
relu2_pt = torch.nn.functional.relu(x_pt)
concat2_pt = torch.concat([relu2_pt, p_pt])
relu3_pt = torch.nn.functional.relu(x_pt)
softmax_pt = torch.nn.functional.softmax(relu3_pt, dim=0)
concat3_pt = torch.concat([softmax_pt, softmax_pt])
relu4_pt = torch.nn.functional.relu(z_pt)
gelu_pt = torch.nn.functional.gelu(x_pt)
concat4_pt = torch.concat([relu4_pt, gelu_pt])
y_pt = (((concat1_pt + concat2_pt) + concat3_pt) + concat4_pt)
y_ait = torch.empty_like(y_pt)
with compile_model(model_output, detect_target(), '/tmp', 'fuse_duplicate_fused_elementwise_all_interactions') as module:
module.run_with_tensors(inputs={'input_x': x_pt, 'input_z': z_pt, 'input_p': p_pt}, outputs={'output': y_ait})
graph = module.debug_sorted_graph
nrelu = self._count_fused_elementwise_ops(graph, [FuncEnum.RELU])
ngelu = self._count_fused_elementwise_ops(graph, [FuncEnum.GELU])
self.assertEqual(nrelu, 2)
self.assertEqual(ngelu, 1)
self.assertTrue(torch.allclose(y_pt, y_ait, atol=0.01, rtol=0.01))
def test_same_and_different_input_accessors(self):
self._test_input_accessors_impl(slice1_start=[0, 0, 0], slice1_end=[32, 64, 50], slice2_start=[0, 0, 0], slice2_end=[32, 64, 50], should_fuse=True)
self._test_input_accessors_impl(slice1_start=[0, 0, 0], slice1_end=[32, 64, 50], slice2_start=[0, 0, 50], slice2_end=[32, 64, 100], should_fuse=False)
def _test_input_accessors_impl(self, slice1_start: List[IntVar], slice1_end: List[IntVar], slice2_start: List[IntVar], slice2_end: List[IntVar], should_fuse: bool):
x = gen_input_tensor(shape=self.SHAPE, name='input_x')
x_sliced_1 = ops.dynamic_slice()(x, slice1_start, slice1_end)
x_sliced_2 = ops.dynamic_slice()(x, slice2_start, slice2_end)
sigmoid1 = ops.elementwise(FuncEnum.SIGMOID)(x_sliced_1)
sigmoid2 = ops.elementwise(FuncEnum.SIGMOID)(x_sliced_2)
softmax1 = ops.softmax()(sigmoid1, dim=0)
softmax2 = ops.softmax()(sigmoid2, dim=0)
model_output = (softmax1 + softmax2)
model_output._attrs['is_output'] = True
model_output._attrs['name'] = 'output'
x_pt = get_random_torch_tensor(self.SHAPE)
x_sliced_1_pt = x_pt[[slice(s, e) for (s, e) in zip(slice1_start, slice1_end)]]
x_sliced_2_pt = x_pt[[slice(s, e) for (s, e) in zip(slice2_start, slice2_end)]]
sigmoid1_pt = torch.sigmoid(x_sliced_1_pt)
sigmoid2_pt = torch.sigmoid(x_sliced_2_pt)
softmax1_pt = torch.nn.functional.softmax(sigmoid1_pt, dim=0)
softmax2_pt = torch.nn.functional.softmax(sigmoid2_pt, dim=0)
y_pt = (softmax1_pt + softmax2_pt)
y_ait = torch.empty_like(y_pt)
with compile_model(model_output, detect_target(), '/tmp', 'fuse_duplicate_fused_elementwise_same_input_different_input_accessors') as module:
module.run_with_tensors({'input_x': x_pt}, {'output': y_ait})
nsigmoid = self._count_fused_elementwise_ops(module.debug_sorted_graph, [FuncEnum.SIGMOID])
self.assertEqual(nsigmoid, (1 if should_fuse else 2))
self.assertTrue(torch.allclose(y_pt, y_ait, atol=0.01, rtol=0.01)) |
class MyApp(App):
def compose(self):
(yield Header())
(yield Label('[b]Sera que clicou?[/]'))
(yield Input('Digite algo!'))
with Horizontal():
(yield Button('Vermelho!', variant='error'))
(yield Button('Verde!', variant='success'))
(yield Button('Amarelo!', variant='warning'))
(yield Footer()) |
def extractCyptranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('sealed lips', 'Sealed Lips', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesParetoSonificationTracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def load_extensions(force=False):
global _loaded_plugins
if (force or (not _loaded_plugins)):
import pkgutil
import importlib
_loaded_plugins = True
for (module_loader, name, ispkg) in pkgutil.iter_modules():
if name.startswith(PLUGIN_PREFIX):
importlib.import_module(name) |
def random_box():
xx = randrange((- 55), 54)
yy = randrange((- 55), 54)
zz = randrange((- 55), 54)
x = randrange(0, 21)
y = randrange(0, 21)
z = randrange(0, 21)
red = random()
green = random()
blue = random()
box(pos=(xx, yy, zz), length=x, height=y, width=z, color=(red, green, blue)) |
class TestStageFlow(TestCase):
def test_get_first_stage(self) -> None:
self.assertEqual(DummyStageFlow.STAGE_1, DummyStageFlow.get_first_stage())
def test_get_last_stage(self) -> None:
self.assertEqual(DummyStageFlow.STAGE_3, DummyStageFlow.get_last_stage())
def test_move_forward(self) -> None:
stage = DummyStageFlow.get_first_stage()
self.assertEqual(DummyStageFlow.STAGE_1, stage)
self.assertEqual(DummyStageFlow.STAGE_2, stage.next_stage)
self.assertEqual(DummyStageFlow.STAGE_3, stage.next_stage.next_stage)
self.assertEqual(None, stage.next_stage.next_stage.next_stage)
def test_move_backwards(self) -> None:
stage = DummyStageFlow.get_last_stage()
self.assertEqual(DummyStageFlow.STAGE_3, stage)
self.assertEqual(DummyStageFlow.STAGE_2, stage.previous_stage)
self.assertEqual(DummyStageFlow.STAGE_1, stage.previous_stage.previous_stage)
self.assertEqual(None, stage.previous_stage.previous_stage.previous_stage)
def test_is_started_status(self) -> None:
start_statuses = [DummyStageFlowStatus.STAGE_1_STARTED, DummyStageFlowStatus.STAGE_2_STARTED, DummyStageFlowStatus.STAGE_3_STARTED]
other_statuses = [DummyStageFlowStatus.STAGE_1_FAILED, DummyStageFlowStatus.STAGE_1_COMPLETED, DummyStageFlowStatus.STAGE_2_FAILED, DummyStageFlowStatus.STAGE_2_COMPLETED, DummyStageFlowStatus.STAGE_3_FAILED, DummyStageFlowStatus.STAGE_3_COMPLETED]
self.assertTrue(all((DummyStageFlow.is_started_status(status) for status in start_statuses)))
self.assertTrue(all(((not DummyStageFlow.is_started_status(status)) for status in other_statuses)))
def test_is_completed_status(self) -> None:
completed_statuses = [DummyStageFlowStatus.STAGE_1_COMPLETED, DummyStageFlowStatus.STAGE_2_COMPLETED, DummyStageFlowStatus.STAGE_3_COMPLETED]
other_statuses = [DummyStageFlowStatus.STAGE_1_FAILED, DummyStageFlowStatus.STAGE_1_STARTED, DummyStageFlowStatus.STAGE_2_FAILED, DummyStageFlowStatus.STAGE_2_STARTED, DummyStageFlowStatus.STAGE_3_FAILED, DummyStageFlowStatus.STAGE_3_STARTED]
self.assertTrue(all((DummyStageFlow.is_completed_status(status) for status in completed_statuses)))
self.assertTrue(all(((not DummyStageFlow.is_completed_status(status)) for status in other_statuses)))
def test_is_failed_status(self) -> None:
failed_statuses = [DummyStageFlowStatus.STAGE_1_FAILED, DummyStageFlowStatus.STAGE_2_FAILED, DummyStageFlowStatus.STAGE_3_FAILED]
other_statuses = [DummyStageFlowStatus.STAGE_1_COMPLETED, DummyStageFlowStatus.STAGE_1_STARTED, DummyStageFlowStatus.STAGE_2_COMPLETED, DummyStageFlowStatus.STAGE_2_STARTED, DummyStageFlowStatus.STAGE_3_COMPLETED, DummyStageFlowStatus.STAGE_3_STARTED]
self.assertTrue(all((DummyStageFlow.is_failed_status(status) for status in failed_statuses)))
self.assertTrue(all(((not DummyStageFlow.is_failed_status(status)) for status in other_statuses)))
def test_get_stage_from_status(self) -> None:
stage_1_statuses = [DummyStageFlowStatus.STAGE_1_COMPLETED, DummyStageFlowStatus.STAGE_1_FAILED, DummyStageFlowStatus.STAGE_1_STARTED]
stage_2_statuses = [DummyStageFlowStatus.STAGE_2_COMPLETED, DummyStageFlowStatus.STAGE_2_FAILED, DummyStageFlowStatus.STAGE_2_STARTED]
stage_3_statuses = [DummyStageFlowStatus.STAGE_3_COMPLETED, DummyStageFlowStatus.STAGE_3_FAILED, DummyStageFlowStatus.STAGE_3_STARTED]
for (stage, statuses) in zip((DummyStageFlow.STAGE_1, DummyStageFlow.STAGE_2, DummyStageFlow.STAGE_3), (stage_1_statuses, stage_2_statuses, stage_3_statuses)):
for status in statuses:
self.assertIs(stage, DummyStageFlow.get_stage_from_status(status))
def test_get_next_runnable_stage_from_status(self) -> None:
stage_1_is_next = [DummyStageFlowStatus.STAGE_1_FAILED]
stage_2_is_next = [DummyStageFlowStatus.STAGE_1_COMPLETED, DummyStageFlowStatus.STAGE_2_FAILED]
stage_3_is_next = [DummyStageFlowStatus.STAGE_2_COMPLETED, DummyStageFlowStatus.STAGE_3_FAILED]
nothing_is_next = [DummyStageFlowStatus.STAGE_1_STARTED, DummyStageFlowStatus.STAGE_2_STARTED, DummyStageFlowStatus.STAGE_3_STARTED, DummyStageFlowStatus.STAGE_3_COMPLETED]
for (stage, statuses) in zip((DummyStageFlow.STAGE_1, DummyStageFlow.STAGE_2, DummyStageFlow.STAGE_3, None), (stage_1_is_next, stage_2_is_next, stage_3_is_next, nothing_is_next)):
for status in statuses:
self.assertIs(stage, DummyStageFlow.get_next_runnable_stage_from_status(status))
def test_get_stage_from_name(self) -> None:
expected_stage = DummyStageFlow.get_first_stage()
actual_stage = DummyStageFlow.get_stage_from_str(expected_stage.name.lower())
self.assertEqual(expected_stage, actual_stage)
actual_stage = DummyStageFlow.get_stage_from_str(expected_stage.name.upper())
self.assertEqual(expected_stage, actual_stage)
with self.assertRaises(StageFlowStageNotFoundError):
DummyStageFlow.get_stage_from_str('do not name your stage this or you will be fired')
def test_duplicate_status_stage_flow(self) -> None:
with self.assertRaises(StageFlowDuplicateStatusError):
class DoNotCreateAStageFlowLikeThisOrYouWillBeFired(StageFlow):
STAGE_1 = DummyStageFlowData(initialized_status=DummyStageFlowStatus.STAGE_1_INITIALIZED, started_status=DummyStageFlowStatus.STAGE_1_STARTED, completed_status=DummyStageFlowStatus.STAGE_1_COMPLETED, failed_status=DummyStageFlowStatus.STAGE_1_FAILED)
CLOWNY_STAGE = DummyStageFlowData(initialized_status=DummyStageFlowStatus.STAGE_1_INITIALIZED, started_status=DummyStageFlowStatus.STAGE_2_STARTED, completed_status=DummyStageFlowStatus.STAGE_2_COMPLETED, failed_status=DummyStageFlowStatus.STAGE_2_FAILED) |
def motion_notify(win, ev, c):
try:
dx = (((ev.x - win.click_x) / c.get_scale()) / 3.79)
dy = (((ev.y - win.click_y) / c.get_scale()) / 3.79)
win.click_x = ev.x
win.click_y = ev.y
(x1, y1, x2, y2) = c.get_bounds()
c.set_bounds((x1 - dx), (y1 - dy), (x2 - dx), (y2 - dy))
except:
pass |
def test_get_ops():
assert isinstance(get_ops('numpy'), NumpyOps)
assert isinstance(get_ops('cupy'), CupyOps)
try:
from thinc_apple_ops import AppleOps
assert isinstance(get_ops('cpu'), AppleOps)
except ImportError:
assert isinstance(get_ops('cpu'), NumpyOps)
try:
from thinc_bigendian_ops import BigEndianOps
assert isinstance(get_ops('cpu'), BigEndianOps)
except ImportError:
assert isinstance(get_ops('cpu'), NumpyOps)
with pytest.raises(ValueError):
get_ops('blah')
ops = Ops(numpy)
assert (ops.xp == numpy) |
def output_registers(bit_offset, in_use):
reg = RegisterAddress(frame_offsets=[29, 28], bit_offset=bit_offset, reverse=True)
for (idx, register) in enumerate(REGISTER_MAP):
if (register is None):
for _ in range(16):
reg.next_bit(used=False)
continue
(layout, register_name) = register
layout_bits = REGISTER_LAYOUT[layout]
simple_layout = (len(layout_bits[0]) == 2)
if True:
bit_count = 0
if simple_layout:
for (field, width) in layout_bits:
for bit in range(width):
bit_count += 1
if (field is None):
reg.next_bit(used=False)
continue
print('CMT_LOWER_B.MMCME2_ADV.{}_{}_{}[{}] {}'.format(register_name, layout, field, bit, reg.next_bit()))
else:
for (field, width, start_bit) in layout_bits:
for bit in range(width):
bit_count += 1
if (field is None):
reg.next_bit(used=False)
continue
print('CMT_LOWER_B.MMCME2_ADV.{}[{}] {}'.format(field, (start_bit + bit), reg.next_bit()))
assert (bit_count == 16)
else:
for bit in range(16):
if ((register_name != layout) or (layout in ['CLKOUT1', 'CLKOUT2'])):
print('CMT_LOWER_B.MMCME2_ADV.{}_{}[{}] {}'.format(register_name, layout, bit, reg.next_bit()))
else:
print('CMT_LOWER_B.MMCME2_ADV.{}[{}] {}'.format(register_name, bit, reg.next_bit()))
parts = in_use.split()
feature = parts[0]
bits = [p for p in parts[1:] if (p not in reg.bits_used)]
print('{} {}'.format(feature, ' '.join(bits))) |
def mul_polys(a, b, modulus, root_of_unity):
rootz = [1, root_of_unity]
while (rootz[(- 1)] != 1):
rootz.append(((rootz[(- 1)] * root_of_unity) % modulus))
if (len(rootz) > (len(a) + 1)):
a = (a + ([0] * ((len(rootz) - len(a)) - 1)))
if (len(rootz) > (len(b) + 1)):
b = (b + ([0] * ((len(rootz) - len(b)) - 1)))
x1 = _fft(a, modulus, rootz[:(- 1)])
x2 = _fft(b, modulus, rootz[:(- 1)])
return _fft([((v1 * v2) % modulus) for (v1, v2) in zip(x1, x2)], modulus, rootz[:0:(- 1)]) |
class Command(mixins.ETLMixin, BaseCommand):
help = 'Loads CGACs, FRECs, Subtier Agencies, Toptier Agencies, and Agencies. Load is all or nothing. If anything fails, nothing gets saved.'
agency_file = None
force = False
etl_logger_function = logger.info
etl_dml_sql_directory = (Path(__file__).resolve().parent / 'load_agencies_sql')
etl_timer = Timer
def add_arguments(self, parser):
parser.add_argument('agency_file', metavar='AGENCY_FILE', help='Path (for local files) or URI (for or S3 files) of the raw agency CSV file to be loaded.')
parser.add_argument('--force', action='store_true', help=f'Reloads agencies even if the max change threshold of {MAX_CHANGES:,} is exceeded. This is a safety precaution to prevent accidentally updating every award, transaction, and subaward in the system as part of the nightly pipeline. Will also force foreign key table links to be examined even if it appears there were no agency changes.')
def handle(self, *args, **options):
self.agency_file = options['agency_file']
self.force = options['force']
logger.info(f'AGENCY FILE: {self.agency_file}')
logger.info(f'FORCE SWITCH: {self.force}')
logger.info(f"MAX CHANGE LIMIT: {('unlimited' if self.force else f'{MAX_CHANGES:,}')}")
with Timer('Load agencies'):
try:
with transaction.atomic():
self._perform_load()
t = Timer('Commit agency transaction')
t.log_starting_message()
t.log_success_message()
except Exception:
logger.error('ALL CHANGES ROLLED BACK DUE TO EXCEPTION')
raise
try:
self._vacuum_tables()
except Exception:
logger.error('CHANGES WERE SUCCESSFULLY COMMITTED EVEN THOUGH VACUUMS FAILED')
raise
def _read_raw_agencies_csv(self):
agencies = read_csv_file_as_list_of_dictionaries(self.agency_file)
if (len(agencies) < 1):
raise RuntimeError(f"Agency file '{self.agency_file}' appears to be empty")
self.agencies = [Agency(row_number=row_number, cgac_agency_code=prep(agency['CGAC AGENCY CODE']), agency_name=prep(agency['AGENCY NAME']), agency_abbreviation=prep(agency['AGENCY ABBREVIATION']), frec=prep(agency['FREC']), frec_entity_description=prep(agency['FREC Entity Description']), frec_abbreviation=prep(agency['FREC ABBREVIATION']), subtier_code=prep(agency['SUBTIER CODE']), subtier_name=prep(agency['SUBTIER NAME']), subtier_abbreviation=prep(agency['SUBTIER ABBREVIATION']), toptier_flag=bool(strtobool(prep(agency['TOPTIER_FLAG']))), is_frec=bool(strtobool(prep(agency['IS_FREC']))), frec_cgac_association=bool(strtobool(prep(agency['FREC CGAC ASSOCIATION']))), user_selectable=bool(strtobool(prep(agency['USER SELECTABLE ON USASPENDING.GOV']))), mission=prep(agency['MISSION']), about_agency_data=prep(agency['ABOUT AGENCY DATA']), website=prep(agency['WEBSITE']), congressional_justification=prep(agency['CONGRESSIONAL JUSTIFICATION']), icon_filename=prep(agency['ICON FILENAME'])) for (row_number, agency) in enumerate(agencies, start=1)]
return len(self.agencies)
def _perform_validations(self):
sql = (Path(self.etl_dml_sql_directory) / 'validations.sql').read_text().format(temp_table=TEMP_TABLE_NAME)
messages = [result[0] for result in execute_sql(sql, read_only=False)]
if messages:
m = '\n'.join(messages)
raise RuntimeError(f'''The following {len(messages):,} problem(s) have been found with the agency file:
{m}''')
def _import_raw_agencies(self):
sql = (Path(self.etl_dml_sql_directory) / 'insert_into.sql').read_text().format(temp_table=TEMP_TABLE_NAME)
with get_connection(read_only=False).cursor() as cursor:
execute_values(cursor.cursor, sql, self.agencies, page_size=len(self.agencies))
return cursor.rowcount
def _perform_load(self):
overrides = {'insert_overrides': {'create_date': SQL('now()'), 'update_date': SQL('now()')}, 'update_overrides': {'update_date': SQL('now()')}}
agency_table = ETLTable('agency', key_overrides=['toptier_agency_id', 'subtier_agency_id'], **overrides)
cgac_table = ETLTable('cgac', key_overrides=['cgac_code'])
frec_table = ETLTable('frec', key_overrides=['frec_code'])
subtier_agency_table = ETLTable('subtier_agency', key_overrides=['subtier_code'], **overrides)
toptier_agency_table = ETLTable('toptier_agency', key_overrides=['toptier_code'], **overrides)
agency_query = ETLQueryFile((self.etl_dml_sql_directory / 'agency_query.sql'), temp_table=TEMP_TABLE_NAME)
cgac_query = ETLQueryFile((self.etl_dml_sql_directory / 'cgac_query.sql'), temp_table=TEMP_TABLE_NAME)
frec_query = ETLQueryFile((self.etl_dml_sql_directory / 'frec_query.sql'), temp_table=TEMP_TABLE_NAME)
subtier_agency_query = ETLQueryFile((self.etl_dml_sql_directory / 'subtier_agency_query.sql'), temp_table=TEMP_TABLE_NAME)
toptier_agency_query = ETLQueryFile((self.etl_dml_sql_directory / 'toptier_agency_query.sql'), temp_table=TEMP_TABLE_NAME, dod_subsumed=DOD_SUBSUMED_AIDS)
path = self._get_sql_directory_file_path('raw_agency_create_temp_table')
sql = path.read_text().format(temp_table=TEMP_TABLE_NAME)
self._execute_dml_sql(sql, 'Create raw agency temp table')
self._execute_function_and_log(self._read_raw_agencies_csv, 'Read raw agencies csv')
self._execute_function_and_log(self._import_raw_agencies, 'Import raw agencies')
self._execute_function(self._perform_validations, 'Perform validations')
rows_affected = 0
rows_affected += self._delete_update_insert_rows('CGACs', cgac_query, cgac_table)
rows_affected += self._delete_update_insert_rows('FRECs', frec_query, frec_table)
rows_affected += self._delete_update_insert_rows('toptier agencies', toptier_agency_query, toptier_agency_table)
rows_affected += self._delete_update_insert_rows('subtier agencies', subtier_agency_query, subtier_agency_table)
rows_affected += self._delete_update_insert_rows('agencies', agency_query, agency_table)
if ((rows_affected > MAX_CHANGES) and (not self.force)):
raise RuntimeError(f'Exceeded maximum number of allowed changes ({MAX_CHANGES:,}). Use --force switch if this was intentional.')
elif ((rows_affected > 0) or self.force):
self._execute_function_and_log(update_treasury_appropriation_account_agencies, 'Update treasury appropriation accounts')
self._execute_function_and_log(update_federal_account_agency, 'Update federal accounts')
self._execute_etl_dml_sql_directory_file('transaction_normalized_update', 'Update transactions')
self._execute_etl_dml_sql_directory_file('award_update', 'Update awards')
self._execute_etl_dml_sql_directory_file('subaward_update', 'Update subawards')
else:
logger.info('Skipping treasury_appropriation_account, transaction_normalized, awards, and subaward updates since there were no agency changes.')
def _vacuum_tables(self):
self._execute_dml_sql('vacuum (full, analyze) agency', 'Vacuum agency table')
self._execute_dml_sql('vacuum (full, analyze) cgac', 'Vacuum cgac table')
self._execute_dml_sql('vacuum (full, analyze) frec', 'Vacuum frec table')
self._execute_dml_sql('vacuum (full, analyze) subtier_agency', 'Vacuum subtier_agency table')
self._execute_dml_sql('vacuum (full, analyze) toptier_agency', 'Vacuum toptier_agency table') |
class FaucetLink(Link):
def __init__(self, node1, node2, port1=None, port2=None, intf_name1=None, intf_name2=None, addr1=None, addr2=None, **params):
Link.__init__(self, node1, node2, port1=port1, port2=port2, intfName1=intf_name1, intfName2=intf_name2, cls1=FaucetIntf, cls2=FaucetIntf, addr1=addr1, addr2=addr2, params1=params, params2=params) |
def display_main_menu():
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
add_menu_directory_item(string_load(30406), 'plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=library')
add_menu_directory_item(string_load(30407), 'plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_global_types')
add_menu_directory_item(string_load(30408), 'plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_custom_widgets')
add_menu_directory_item(string_load(30409), 'plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=addon_items')
add_menu_directory_item('Custom Nodes', 'plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=custom_nodes')
xbmcplugin.endOfDirectory(handle) |
class DefaultDialogues(Dialogues, ABC):
END_STATES = frozenset({DefaultDialogue.EndState.SUCCESSFUL, DefaultDialogue.EndState.FAILED})
_keep_terminal_state_dialogues = True
def __init__(self, self_address: Address, role_from_first_message: Callable[([Message, Address], Dialogue.Role)], dialogue_class: Type[DefaultDialogue]=DefaultDialogue) -> None:
Dialogues.__init__(self, self_address=self_address, end_states=cast(FrozenSet[Dialogue.EndState], self.END_STATES), message_class=DefaultMessage, dialogue_class=dialogue_class, role_from_first_message=role_from_first_message) |
class Interpreter():
def __init__(self, proc, kwargs, use_randomization=False):
assert isinstance(proc, LoopIR.proc)
self.proc = proc
self.env = ChainMap()
self.use_randomization = use_randomization
for a in proc.args:
if (not (str(a.name) in kwargs)):
raise TypeError(f"expected argument '{a.name}' to be supplied")
if (a.type is T.size):
if (not is_pos_int(kwargs[str(a.name)])):
raise TypeError(f"expected size '{a.name}' to have positive integer value")
self.env[a.name] = kwargs[str(a.name)]
elif (a.type is T.index):
if (type(kwargs[str(a.name)]) is not T.index):
raise TypeError(f"expected index variable '{a.name}' to be an integer")
self.env[a.name] = kwargs[str(a.name)]
elif (a.type is T.bool):
if (type(kwargs[str(a.name)]) is not bool):
raise TypeError(f"expected bool variable '{a.name}' to be a bool")
self.env[a.name] = kwargs[str(a.name)]
else:
assert a.type.is_numeric()
self.simple_typecheck_buffer(a, kwargs)
self.env[a.name] = kwargs[str(a.name)]
self.env.new_child()
self.eval_stmts(proc.body)
self.env.parents
def simple_typecheck_buffer(self, fnarg, kwargs):
typ = fnarg.type
buf = kwargs[str(fnarg.name)]
nm = fnarg.name
pre = f"bad argument '{nm}'"
if (not isinstance(buf, np.ndarray)):
raise TypeError(f'{pre}: expected numpy.ndarray')
elif ((buf.dtype != float) and (buf.dtype != np.float32) and (buf.dtype != np.float16)):
raise TypeError(f"{pre}: expected buffer of floating-point values; had '{buf.dtype}' values")
if typ.is_real_scalar():
if (tuple(buf.shape) != (1,)):
raise TypeError(f'{pre}: expected buffer of shape (1,), but got shape {tuple(buf.shape)}')
else:
shape = self.eval_shape(typ)
if (shape != tuple(buf.shape)):
raise TypeError(f'{pre}: expected buffer of shape {shape}, but got shape {tuple(buf.shape)}')
def eval_stmts(self, stmts):
for s in stmts:
self.eval_s(s)
def eval_s(self, s):
styp = type(s)
if (styp is LoopIR.Pass):
pass
elif ((styp is LoopIR.Assign) or (styp is LoopIR.Reduce)):
lbuf = self.env[s.name]
if (len(s.idx) == 0):
idx = (0,)
else:
idx = tuple((self.eval_e(a) for a in s.idx))
rhs = self.eval_e(s.rhs)
if (styp is LoopIR.Assign):
lbuf[idx] = rhs
else:
lbuf[idx] += rhs
elif (styp is LoopIR.If):
cond = self.eval_e(s.cond)
if cond:
self.env.new_child()
self.eval_stmts(s.body)
self.env.parents
if (s.orelse and (not cond)):
self.env.new_child()
self.eval_stmts(s.orelse)
self.env.parents
elif (styp is LoopIR.For):
lo = self.eval_e(s.lo)
hi = self.eval_e(s.hi)
assert (self.use_randomization is False), 'TODO: Implement Rand'
self.env.new_child()
for itr in range(lo, hi):
self.env[s.iter] = itr
self.eval_stmts(s.body)
self.env.parents
elif (styp is LoopIR.Alloc):
if s.type.is_real_scalar():
self.env[s.name] = np.empty([1])
else:
size = self.eval_shape(s.type)
self.env[s.name] = np.empty(size)
elif (styp is LoopIR.Call):
argvals = [self.eval_e(a, call_arg=True) for a in s.args]
argnames = [str(a.name) for a in s.f.args]
kwargs = {nm: val for (nm, val) in zip(argnames, argvals)}
Interpreter(s.f, kwargs, use_randomization=self.use_randomization)
else:
assert False, 'bad case'
def eval_e(self, e, call_arg=False):
etyp = type(e)
if (etyp is LoopIR.Read):
buf = self.env[e.name]
if (call_arg or isinstance(buf, (int, bool))):
return buf
else:
idx = ((0,) if (len(e.idx) == 0) else tuple((self.eval_e(a) for a in e.idx)))
return buf[idx]
elif (etyp is LoopIR.Const):
return e.val
elif (etyp is LoopIR.USub):
return (- self.eval_e(e.arg))
elif (etyp is LoopIR.BinOp):
(lhs, rhs) = (self.eval_e(e.lhs), self.eval_e(e.rhs))
if (e.op == '+'):
return (lhs + rhs)
elif (e.op == '-'):
return (lhs - rhs)
elif (e.op == '*'):
return (lhs * rhs)
elif (e.op == '/'):
if isinstance(lhs, int):
return (((lhs + rhs) - 1) // rhs)
else:
return (lhs / rhs)
elif (e.op == '%'):
return (lhs % rhs)
elif (e.op == '=='):
return (lhs == rhs)
elif (e.op == '<'):
return (lhs < rhs)
elif (e.op == '>'):
return (lhs > rhs)
elif (e.op == '<='):
return (lhs <= rhs)
elif (e.op == '>='):
return (lhs >= rhs)
elif (e.op == 'and'):
return (lhs and rhs)
elif (e.op == 'or'):
return (lhs or rhs)
elif (etyp is LoopIR.BuiltIn):
args = [self.eval_e(a) for a in e.args]
return e.f.interpret(args)
else:
assert False, 'bad case'
def eval_shape(self, typ):
return tuple((self.eval_e(s) for s in typ.shape())) |
def verify_json_dump(joblist, config, selected_jobs, run_id):
expected_default_env = {'_ERT_ITERATION_NUMBER': '0', '_ERT_REALIZATION_NUMBER': '0', '_ERT_RUNPATH': './'}
assert ('config_path' in config)
assert ('config_file' in config)
assert (run_id == config['run_id'])
assert (len(selected_jobs) == len(config['jobList']))
for (job_index, selected_job) in enumerate(selected_jobs):
job = joblist[selected_job]
loaded_job = config['jobList'][job_index]
arg_list_back_up = job['argList']
job['argList'] = empty_list_if_none(job['argList'])
name_back_up = job['name']
job['name'] = default_name_if_none(job['name'])
for key in json_keywords:
if (key in ['stdout', 'stderr']):
assert (create_std_file(job, std=key, job_index=job_index) == loaded_job[key])
elif (key == 'executable'):
assert (job[key] in loaded_job[key])
elif ((key == 'environment') and (job[key] is None)):
assert (loaded_job[key] == expected_default_env)
elif ((key == 'environment') and (job[key] is not None)):
for k in job[key]:
if (k not in ForwardModel.default_env):
assert (job[key][k] == loaded_job[key][k])
else:
assert (job[key][k] == ForwardModel.default_env[k])
assert (loaded_job[key][k] == expected_default_env[k])
else:
assert (job[key] == loaded_job[key])
job['argList'] = arg_list_back_up
job['name'] = name_back_up |
class OptionSeriesSplineSonificationDefaultinstrumentoptionsMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ClkReg1():
def __init__(self, value=0):
self.unpack(value)
def unpack(self, value):
self.low_time = ((value >> 0) & ((2 ** 6) - 1))
self.high_time = ((value >> 6) & ((2 ** 6) - 1))
self.reserved = ((value >> 12) & ((2 ** 1) - 1))
self.phase_mux = ((value >> 13) & ((2 ** 3) - 1))
def pack(self):
value = (self.low_time << 0)
value |= (self.high_time << 6)
value |= (self.reserved << 12)
value |= (self.phase_mux << 13)
return value
def __repr__(self):
s = 'ClkReg1:\n'
s += ' low_time: {:d}\n'.format(self.low_time)
s += ' high_time: {:d}\n'.format(self.high_time)
s += ' reserved: {:d}\n'.format(self.reserved)
s += ' phase_mux: {:d}'.format(self.phase_mux)
return s |
def test_save_image_in_tempfolder():
utils.logger.setLevel('DEBUG')
image = QtGui.QImage(20, 20, QtGui.QImage.Format.Format_RGB32)
test_id = f'_unittest_{uuid4()}'
utils.save_image_in_temp_folder(image, postfix=test_id)
png_files = list((Path(tempfile.gettempdir()) / 'normcap').glob(f'*{test_id}*'))
assert (len(png_files) == 1)
assert (png_files[0].suffix == '.png')
assert re.fullmatch('^\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}-\\d{2}_.*', png_files[0].stem) |
class ImgFace(RectFace):
def __init__(self, img_path, width, height, name='', padding_x=0, padding_y=0):
RectFace.__init__(self, width=width, height=height, name=name, padding_x=padding_x, padding_y=padding_y)
with open(img_path, 'rb') as handle:
img = base64.b64encode(handle.read()).decode('utf-8')
extension = pathlib.Path(img_path).suffix[1:]
if (extension not in ALLOWED_IMG_EXTENSIONS):
print(((('The image does not have an allowed format: ' + extension) + ' not in ') + str(ALLOWED_IMG_EXTENSIONS)))
self.content = f'data:image/{extension};base64,{img}'
self.stretch = False
def __name__(self):
return 'ImgFace'
def draw(self, drawer):
(yield draw_img(self._box, self.content)) |
class Attachment():
def __init__(self, id: Optional[Union[(str, int, float)]]=None, kind: Optional[Union[(AttachmentKind, str)]]=None, content: Optional[Union[(AttachmentContent, Tuple[(str, Union[(str, bytes)])])]]=None, title: Optional[str]=None, raw: Any=None, get_file: Optional[Callable[(..., Awaitable)]]=None):
if ((not id) and (not content)):
raise ValueError('Attachment at least should have id or content')
self.id = id
self.kind = kind
self.content = content
self.title = title
self.raw: Any = raw
self._get_file = get_file
self._file = None
async def get_content(self):
if (self._file is not None):
return self._file
if (self._get_file is None):
raise ValueError("Can't get content for this file")
file = (await self._get_file())
self._file = file
return file |
.django_db
def test_category_awarding_subagency_subawards(agency_test_data):
test_payload = {'category': 'awarding_subagency', 'subawards': True, 'page': 1, 'limit': 50}
spending_by_category_logic = AwardingSubagencyViewSet().perform_search(test_payload, {})
expected_response = {'category': 'awarding_subagency', 'limit': 50, 'page_metadata': {'page': 1, 'next': None, 'previous': None, 'hasNext': False, 'hasPrevious': False}, 'results': [{'amount': 150, 'name': 'Awarding Subtier Agency 3', 'code': 'SA3', 'id': 1003}], 'messages': [get_time_period_message()]}
assert (expected_response == spending_by_category_logic) |
class SemanticDocument(SemanticMixedContentWrapper):
def __init__(self):
self.front = SemanticFront()
self.body_section = SemanticSection(section_type=SemanticSectionTypes.BODY)
self.back_section = SemanticSection(section_type=SemanticSectionTypes.BACK)
super().__init__([self.front, self.body_section, self.back_section]) |
def generate_sample_data():
file = TemporaryFile('w+b')
writer = Writer(file)
writer.start(profile='ros2', library='test')
string_schema_id = writer.register_schema(name=String._type, encoding='ros2msg', data=String._full_text.encode())
string_channel_id = writer.register_channel(topic='/chatter', message_encoding='cdr', schema_id=string_schema_id)
for i in range(10):
msg = String(data=f'string message {i}')
buff = BytesIO()
msg.serialize(buff)
writer.add_message(channel_id=string_channel_id, log_time=(i * 1000), data=buff.getvalue(), publish_time=(i * 1000))
empty_schema_id = writer.register_schema(name=Empty._type, encoding='ros2msg', data=Empty._full_text.encode())
empty_channel_id = writer.register_channel(topic='/empty', message_encoding='cdr', schema_id=empty_schema_id)
for i in range(10):
msg = Empty()
buff = BytesIO()
msg.serialize(buff)
writer.add_message(channel_id=empty_channel_id, log_time=(i * 1000), data=buff.getvalue(), publish_time=(i * 1000))
writer.finish()
file.seek(0)
(yield file) |
class ColumnInteractionPlot(Metric[ColumnInteractionPlotResults]):
x_column: str
y_column: str
def __init__(self, x_column: str, y_column: str, options: AnyOptions=None):
self.x_column = x_column
self.y_column = y_column
super().__init__(options=options)
def calculate(self, data: InputData) -> ColumnInteractionPlotResults:
for col in [self.x_column, self.y_column]:
if (not data.has_column(col)):
raise ValueError(f"Column '{col}' not found in dataset.")
(x_type, x_curr, x_ref) = data.get_data(self.x_column)
(y_type, y_curr, y_ref) = data.get_data(self.y_column)
for column in [x_curr, x_ref, y_curr, y_ref]:
if (column is not None):
column.replace(to_replace=[np.inf, (- np.inf)], value=np.nan, inplace=True)
if (x_type == ColumnType.Categorical):
(x_curr, x_ref) = relabel_data(x_curr, x_ref)
if (y_type == ColumnType.Categorical):
(y_curr, y_ref) = relabel_data(y_curr, y_ref)
agg_data = True
if self.get_options().render_options.raw_data:
agg_data = False
if ((x_type == ColumnType.Numerical) and (y_type == ColumnType.Numerical)):
(raw_plot, agg_plot) = get_data_for_num_num_plot(agg_data, self.x_column, self.y_column, x_curr, y_curr, (x_ref if (x_ref is not None) else None), (y_ref if (y_ref is not None) else None))
if (raw_plot is not None):
return ColumnInteractionPlotResults(x_type=x_type, y_type=y_type, current_scatter=raw_plot['current'], reference_scatter=raw_plot.get('reference'))
return ColumnInteractionPlotResults(x_type=x_type, y_type=y_type, current_contour=agg_plot['current'], reference_contour=agg_plot.get('reference'))
if ((x_type == ColumnType.Categorical) and (y_type == ColumnType.Categorical)):
result = get_data_for_cat_cat_plot(self.x_column, self.y_column, x_curr, y_curr, (x_ref if (x_ref is not None) else None), (y_ref if (y_ref is not None) else None))
return ColumnInteractionPlotResults(x_type=x_type, y_type=y_type, current=result['current'], reference=result.get('reference'))
if (((x_type == ColumnType.Categorical) and (y_type == ColumnType.Numerical)) or ((x_type == ColumnType.Numerical) and (y_type == ColumnType.Categorical))):
curr_df = pd.DataFrame({self.x_column: x_curr, self.y_column: y_curr})
ref_df = None
if ((x_ref is not None) and (y_ref is not None)):
ref_df = pd.DataFrame({self.x_column: x_ref, self.y_column: y_ref})
if (x_type == ColumnType.Categorical):
(cat_name, num_name) = (self.x_column, self.y_column)
else:
(cat_name, num_name) = (self.y_column, self.x_column)
result = prepare_box_data(curr_df, ref_df, cat_name, num_name)
return ColumnInteractionPlotResults(x_type=x_type, y_type=y_type, current_boxes=result['current'], reference_boxes=result.get('reference'))
if (((x_type == ColumnType.Numerical) and (y_type == ColumnType.Datetime)) or ((x_type == ColumnType.Datetime) and (y_type == ColumnType.Numerical))):
if (x_type == ColumnType.Numerical):
(date_name, date_curr, date_ref) = (self.y_column, y_curr, y_ref)
(num_name, num_curr, num_ref) = (self.x_column, x_curr, x_ref)
else:
(date_name, date_curr, date_ref) = (self.x_column, x_curr, x_ref)
(num_name, num_curr, num_ref) = (self.y_column, y_curr, y_ref)
(curr_res, ref_res, prefix) = prepare_data_for_date_num(date_curr, date_ref, date_name, num_name, num_curr, num_ref)
return ColumnInteractionPlotResults(x_type=x_type, y_type=y_type, current=curr_res, reference=ref_res, prefix=prefix)
if (((x_type == ColumnType.Categorical) and (y_type == ColumnType.Datetime)) or ((x_type == ColumnType.Datetime) and (y_type == ColumnType.Categorical))):
if (x_type == ColumnType.Categorical):
(date_name, date_curr, date_ref) = (self.y_column, y_curr, y_ref)
(cat_name, cat_curr, cat_ref) = (self.x_column, x_curr, x_ref)
else:
(date_name, date_curr, date_ref) = (self.x_column, x_curr, x_ref)
(cat_name, cat_curr, cat_ref) = (self.y_column, y_curr, y_ref)
(curr_res, ref_res, prefix) = prepare_data_for_date_cat(date_curr, date_ref, date_name, cat_name, cat_curr, cat_ref)
return ColumnInteractionPlotResults(x_type=x_type, y_type=y_type, current=curr_res, reference=ref_res, prefix=prefix)
raise ValueError(f'Combination of types {x_type} and {y_type} is not supported.') |
def extractInnercitynovelBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesScatter3dSonificationDefaultinstrumentoptionsMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_feeder_list_devices(client: TestClient, with_registered_device: None):
from tests.test_database_models import SAMPLE_DEVICE_HID
response = client.get('/api/v1/feeder')
assert (response.status_code == 200)
devices = response.json()
assert (len(devices) == 1)
assert (devices[0]['hid'] == SAMPLE_DEVICE_HID) |
class OptionPlotoptionsBulletSonificationDefaultspeechoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def temppass(request):
handle = request.POST.get('handle', '')
if (handle != ADMIN_USER):
return error_template(request, 'Could not find signature in feed of user {}'.format(handle))
publickey_str = request.POST.get('pubkey')
if (':' not in publickey_str):
return error_template(request, 'Incorrect public key format, should be "e:n"')
(e, n) = [int(x) for x in publickey_str.split(':')]
if (e <= 3):
return error_template(request, 'Invalid public key, e is too small.')
pin = request.session.get('pin')
if (not pin):
return error_template(request, 'No validation pin in your session, aborting.')
if validate_sig(pin, n, e, SIGNATURE):
letters = ((string.ascii_lowercase + string.digits) + string.ascii_uppercase)
password = ''.join((choice(letters) for i in range(15)))
request.session['pass'] = password
template = loader.get_template('temppass.html')
context = {'password': password}
return HttpResponse(template.render(context, request))
return error_template(request, 'Signature could not be verified with that public key.') |
class TestListPluggedDevice(object):
def test_debug_devices(self, monkeypatch):
monkeypatch.setenv('RIVALCFG_PROFILE', '1038:1702')
devices_list = list(devices.list_plugged_devices())
debug_device_found = False
for device in devices_list:
if ((device['vendor_id'] == 4152) and (device['product_id'] == 5890) and (device['name'] == 'SteelSeries Rival 100')):
debug_device_found = True
assert debug_device_found |
class FederalAccountCountViewSet(DisasterBase):
endpoint_doc = 'usaspending_api/api_contracts/contracts/v2/disaster/federal_account/count.md'
_response()
def post(self, request: Request) -> Response:
filters = [Q(treasury_account__federal_account_id=OuterRef('pk')), self.is_in_provided_def_codes, self.all_closed_defc_submissions, self.is_non_zero_total_spending]
count = FederalAccount.objects.annotate(include=Exists(FinancialAccountsByProgramActivityObjectClass.objects.filter(*filters).values('pk'))).filter(include=True).values('pk').count()
return Response({'count': count}) |
def initialize_network_objects() -> List[Network]:
networks_obj = []
networks_json_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '__json'))
with open(os.path.join(networks_json_path, 'eth_networks.json'), encoding='UTF-8') as open_file:
network_data = json.load(open_file)
for entry in network_data:
try:
network = Network(chain_id=entry['chainId'], name=entry['name'], shortName=entry['shortName'], symbol=ChainId(entry['chainId']))
networks_obj.append(network)
except ValueError:
warnings.warn(f"Network {entry['chainId']} with name '{entry['name']}' does not have a valid ChainId. eth-typing should be updated with the latest networks.", stacklevel=2)
return networks_obj |
class NodeUpdateForm(UpdateForm):
_api_call = system_node_update
_node_hostname = None
hostnames = ArrayField(required=True, widget=forms.HiddenInput(attrs={'class': 'hide'}))
def _add_error(self, field_name, error):
if self._node_hostname:
if isinstance(error, (list, tuple)):
error = [('%s: %s' % (self._node_hostname, err)) for err in error]
else:
error = ('%s: %s' % (self._node_hostname, error))
return super(NodeUpdateForm, self)._add_error(field_name, error)
def call_system_node_update(self, hostname):
self._node_hostname = hostname
return self.save(action='update', args=(hostname,)) |
def extractPoppyscanlationsVideoBlog(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('A Seductive Gentleman', 'A Seductive Gentleman', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
.django_db
def test_just_fy(client, create_gtas_data):
resp = client.get('/api/v2/references/total_budgetary_resources/?fiscal_year=2020')
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.data == {'results': [{'fiscal_year': 2020, 'fiscal_period': 3, 'total_budgetary_resources': Decimal(4)}, {'fiscal_year': 2020, 'fiscal_period': 2, 'total_budgetary_resources': Decimal(3)}], 'messages': []}) |
def test_etm_chat_conversion_group(db, slave):
group_chat = slave.get_chat_by_criteria(chat_type='GroupChat')
assert isinstance(group_chat, GroupChat)
etm_group_chat = convert_chat(db, group_chat)
assert isinstance(etm_group_chat, ETMGroupChat)
assert isinstance(etm_group_chat.self, ETMSelfChatMember)
assert (etm_group_chat.self in etm_group_chat.members)
assert all((isinstance(i, ETMChatMember) for i in etm_group_chat.members))
assert (len(etm_group_chat.members) == len(group_chat.members)) |
def test_discard_thin_prisms():
prism_boundaries = np.array([[(- 5000.0), 5000.0, (- 5000.0), 5000.0, 0.0, 55.1], [5000.0, 15000.0, (- 5000.0), 5000.0, 0.0, 55.01], [(- 5000.0), 5000.0, 5000.0, 15000.0, 0.0, 35.0], [5000.0, 15000.0, 5000.0, 15000.0, 0.0, 84.0]])
densities = np.array([2306, 2122, 2190, 2069])
(thick_prisms, thick_densities) = _discard_thin_prisms(prism_boundaries, densities, thickness_threshold=55.05)
expected_prisms = np.array([[(- 5000.0), 5000.0, (- 5000.0), 5000.0, 0.0, 55.1], [5000.0, 15000.0, 5000.0, 15000.0, 0.0, 84.0]])
expected_densities = np.array([2306, 2069])
npt.assert_allclose(expected_prisms, thick_prisms)
npt.assert_allclose(expected_densities, thick_densities) |
class LiteralType(_common.FlyteIdlEntity):
def __init__(self, simple=None, schema=None, collection_type=None, map_value_type=None, blob=None, enum_type=None, union_type=None, structured_dataset_type=None, metadata=None, structure=None, annotation=None):
self._simple = simple
self._schema = schema
self._collection_type = collection_type
self._map_value_type = map_value_type
self._blob = blob
self._enum_type = enum_type
self._union_type = union_type
self._structured_dataset_type = structured_dataset_type
self._metadata = metadata
self._structure = structure
self._structured_dataset_type = structured_dataset_type
self._metadata = metadata
self._annotation = annotation
def simple(self) -> SimpleType:
return self._simple
def schema(self) -> SchemaType:
return self._schema
def collection_type(self) -> 'LiteralType':
return self._collection_type
def map_value_type(self) -> 'LiteralType':
return self._map_value_type
def blob(self) -> _core_types.BlobType:
return self._blob
def enum_type(self) -> _core_types.EnumType:
return self._enum_type
def union_type(self) -> UnionType:
return self._union_type
def structure(self) -> TypeStructure:
return self._structure
def structured_dataset_type(self) -> StructuredDatasetType:
return self._structured_dataset_type
def metadata(self):
return self._metadata
def annotation(self) -> TypeAnnotationModel:
return self._annotation
def metadata(self, value):
self._metadata = value
def annotation(self, value):
self.annotation = value
def to_flyte_idl(self):
if (self.metadata is not None):
metadata = _json_format.Parse(_json.dumps(self.metadata), _struct.Struct())
else:
metadata = None
t = _types_pb2.LiteralType(simple=(self.simple if (self.simple is not None) else None), schema=(self.schema.to_flyte_idl() if (self.schema is not None) else None), collection_type=(self.collection_type.to_flyte_idl() if (self.collection_type is not None) else None), map_value_type=(self.map_value_type.to_flyte_idl() if (self.map_value_type is not None) else None), blob=(self.blob.to_flyte_idl() if (self.blob is not None) else None), enum_type=(self.enum_type.to_flyte_idl() if self.enum_type else None), union_type=(self.union_type.to_flyte_idl() if self.union_type else None), structured_dataset_type=(self.structured_dataset_type.to_flyte_idl() if self.structured_dataset_type else None), metadata=metadata, annotation=(self.annotation.to_flyte_idl() if self.annotation else None), structure=(self.structure.to_flyte_idl() if self.structure else None))
return t
def from_flyte_idl(cls, proto):
collection_type = None
map_value_type = None
if proto.HasField('collection_type'):
collection_type = LiteralType.from_flyte_idl(proto.collection_type)
if proto.HasField('map_value_type'):
map_value_type = LiteralType.from_flyte_idl(proto.map_value_type)
return cls(simple=(proto.simple if proto.HasField('simple') else None), schema=(SchemaType.from_flyte_idl(proto.schema) if proto.HasField('schema') else None), collection_type=collection_type, map_value_type=map_value_type, blob=(_core_types.BlobType.from_flyte_idl(proto.blob) if proto.HasField('blob') else None), enum_type=(_core_types.EnumType.from_flyte_idl(proto.enum_type) if proto.HasField('enum_type') else None), union_type=(UnionType.from_flyte_idl(proto.union_type) if proto.HasField('union_type') else None), structured_dataset_type=(StructuredDatasetType.from_flyte_idl(proto.structured_dataset_type) if proto.HasField('structured_dataset_type') else None), metadata=(_json_format.MessageToDict(proto.metadata) or None), structure=(TypeStructure.from_flyte_idl(proto.structure) if proto.HasField('structure') else None), annotation=(TypeAnnotationModel.from_flyte_idl(proto.annotation) if proto.HasField('annotation') else None)) |
class OptionSeriesTreegraphLabelStyle(Options):
def fontSize(self):
return self._config_get('0.8em')
def fontSize(self, num: float):
self._config(num, js_type=False)
def fontWeight(self):
return self._config_get('bold')
def fontWeight(self, text: str):
self._config(text, js_type=False) |
class OptionPlotoptionsArcdiagramOnpointConnectoroptions(Options):
def dashstyle(self):
return self._config_get(None)
def dashstyle(self, text: str):
self._config(text, js_type=False)
def stroke(self):
return self._config_get(None)
def stroke(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(1)
def width(self, num: float):
self._config(num, js_type=False) |
class OptionSeriesLollipopSonificationDefaultinstrumentoptionsMappingHighpass(Options):
def frequency(self) -> 'OptionSeriesLollipopSonificationDefaultinstrumentoptionsMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionSeriesLollipopSonificationDefaultinstrumentoptionsMappingHighpassFrequency)
def resonance(self) -> 'OptionSeriesLollipopSonificationDefaultinstrumentoptionsMappingHighpassResonance':
return self._config_sub_data('resonance', OptionSeriesLollipopSonificationDefaultinstrumentoptionsMappingHighpassResonance) |
def test_ref_plain_two_outputs():
r1 = ReferenceEntity(TaskReference('proj', 'domain', 'some.name', 'abc'), inputs=kwtypes(a=str, b=int), outputs=kwtypes(x=bool, y=int))
ctx = context_manager.FlyteContext.current_context()
with context_manager.FlyteContextManager.with_context(ctx.with_new_compilation_state()):
(xx, yy) = r1(a='five', b=6)
assert (xx.ref.node is yy.ref.node)
assert (xx.var == 'x')
assert (yy.var == 'y')
assert (xx.ref.node_id == 'n0')
assert (len(xx.ref.node.bindings) == 2)
def t2(q: bool, r: int) -> str:
return f'q: {q} r: {r}'
def wf1(a: str, b: int) -> str:
(x_out, y_out) = r1(a=a, b=b)
return t2(q=x_out, r=y_out)
(r1)
def inner_test(ref_mock):
ref_mock.return_value = (False, 30)
x = wf1(a='hello', b=10)
assert (x == 'q: False r: 30')
inner_test() |
def test_cli_literals_list():
cli = Radicli()
ran = False
('test', a=Arg('--a'))
def test(a: List[Literal[('pizza', 'pasta', 'burger')]]):
assert (a == ['pasta', 'pizza'])
nonlocal ran
ran = True
cli.run(['', 'test', '--a', 'pasta', '--a', 'pizza'])
assert ran
with pytest.raises(CliParserError):
cli.run(['', 'test', '--a', 'burger', '--a', 'fries']) |
class ColumnComponent(JsPackage):
lib_alias = {'js': 'tabulator-tables', 'css': 'tabulator-tables'}
lib_selector = 'column'
def getElement(self):
return JsNodeDom.JsDoms(('%s.getElement()' % self.toStr()), page=self.page, component=self.component)
def getTable(self):
return JsObjects.JsObject.JsObject(('%s.getTable()' % self.toStr()), page=self.page, component=self.component)
def getDefinition(self):
return JsObjects.JsObject.JsObject(('%s.getDefinition()' % self.toStr()), page=self.page, component=self.component)
def getField(self):
return JsObjects.JsObject.JsObject(('%s.getField()' % self.toStr()), page=self.page, component=self.component)
def getCells(self):
return CellComponent(js_code=('%s.getCells()' % self.toStr()), page=self.page, component=self.component)
def getNextColumn(self):
return ColumnComponent(js_code=('%s.getNextColumn()' % self.toStr()), page=self.page, component=self.component)
def getPrevColumn(self):
return ColumnComponent(js_code=('%s.getPrevColumn()' % self.toStr()), page=self.page, component=self.component)
def getVisibility(self):
return JsObjects.JsBoolean.JsBoolean(('%s.getVisibility()' % self.toStr()), page=self.page, component=self.component)
def show(self):
return JsObjects.JsObject.JsObject(('%s.show()' % self.toStr()), page=self.page, component=self.component)
def hide(self):
return JsObjects.JsObject.JsObject(('%s.hide()' % self.toStr()), page=self.page, component=self.component)
def toggle(self):
return JsObjects.JsObject.JsObject(('%s.hide()' % self.toStr()), page=self.page, component=self.component)
def delete(self):
return self.fnc_closure_in_promise('delete()')
def scrollTo(self):
return JsObjects.JsObject.JsObject(('%s.scrollTo()' % self.toStr()), page=self.page, component=self.component)
def move(self, text, flag):
text = JsUtils.jsConvertData(text, None)
flag = JsUtils.jsConvertData(flag, None)
return JsObjects.JsObject.JsObject(('%s.move(%s, %s)' % (self.toStr(), text, flag)), page=self.page, component=self.component)
def getSubColumns(self):
return ColumnComponent(js_code=('%s.getSubColumns()' % self.toStr()), page=self.page, component=self.component)
def getParentColumn(self):
return ColumnComponent(js_code=('%s.getParentColumn()' % self.toStr()), page=self.page, component=self.component)
def headerFilterFocus(self):
return JsObjects.JsObject.JsObject(('%s.headerFilterFocus()' % self.toStr()), page=self.page, component=self.component)
def setHeaderFilterValue(self, text):
text = JsUtils.jsConvertData(text, None)
return JsObjects.JsObject.JsObject(('%s.setHeaderFilterValue(%s)' % (self.toStr(), text)), page=self.page, component=self.component)
def reloadHeaderFilter(self):
return JsObjects.JsObject.JsObject(('%s.reloadHeaderFilter()' % self.toStr()), page=self.page, component=self.component) |
class RadialGradient(Html.Html):
name = 'SVG RadialGradient'
def __init__(self, page, html_code):
super(RadialGradient, self).__init__(page, '', html_code=html_code)
self.items = []
def url(self) -> str:
return ('url(#%s)' % self.htmlCode)
def stop(self, offset, styles):
self.items.append(('<stop offset="%s" style="%s" />' % (offset, ';'.join([('%s:%s' % (k, v)) for (k, v) in styles.items()]))))
return self
def __str__(self):
return ('<radialGradient %s>%s</radialGradient>' % (self.get_attrs(css_class_names=self.style.get_classes()), ''.join(self.items))) |
class DiscreteActorCriticHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int], num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_p = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
p = self._mlp_p(x)
logpi = F.log_softmax(p, dim=(- 1))
v = self._mlp_v(x)
return (logpi, v) |
.parametrize('lst, n, expected', [([0], 2, [0]), ([0, 1], 2, [0, 1]), ([0, 1, 2], 2, [[0, 1], 2]), ([0, 1, 2], 3, [0, 1, 2]), ([0, 1, 2, 3], 2, [[0, 1], [2, 3]]), ([0, 1, 2, 3], 3, [[0, 1, 2], 3]), ([0, 1, 2, 3, 4], 3, [[0, 1, 2], 3, 4]), ([0, 1, 2, 3, 4, 5], 3, [[0, 1, 2], [3, 4, 5]]), (list(range(7)), 3, [[0, 1, 2], [3, 4, 5], 6]), (list(range(8)), 3, [[0, 1, 2], [3, 4, 5], [6, 7]]), (list(range(9)), 3, [[0, 1, 2], [3, 4, 5], [6, 7, 8]]), (list(range(10)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 9]), (list(range(11)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 9, 10]), (list(range(12)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [9, 10, 11]]), (list(range(13)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [9, 10, 11], 12]), (list(range(14)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], 12, 13]]), (list(range(15)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [9, 10, 11], [12, 13, 14]]), (list(range(16)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], [12, 13, 14], 15]]), (list(range(23)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], [12, 13, 14], [15, 16, 17]], [[18, 19, 20], 21, 22]]), (list(range(27)), 3, [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], [12, 13, 14], [15, 16, 17]], [[18, 19, 20], [21, 22, 23], [24, 25, 26]]]), (list(range(28)), 3, [[[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], [12, 13, 14], [15, 16, 17]], [[18, 19, 20], [21, 22, 23], [24, 25, 26]]], 27]), (list(range(257)), 256, [list(range(256)), 256]), (list(range(258)), 256, [list(range(256)), 256, 257]), (list(range(512)), 256, [list(range(256)), list(range(256, 512))]), (list(range((512 + 1))), 256, [list(range(256)), list(range(256, 512)), 512]), (list(range((256 ** 2))), 256, [list(range((k * 256), ((k * 256) + 256))) for k in range(256)])])
def test_build_n_ary_tree(lst, n, expected):
assert (build_n_ary_tree(lst, n) == expected) |
class EqlAnalytic(EqlNode):
__slots__ = ('query', 'metadata')
def __init__(self, query, metadata=None):
self.query = query
self.metadata = (metadata or {})
def id(self):
return self.metadata.get('id')
def name(self):
return self.metadata.get('name')
def __unicode__(self):
return self.query.__unicode__()
def __str__(self):
return self.query.__str__()
def _render(self):
return {'metadata': self.metadata, 'query': self.query.render()} |
def parse_args(argv):
parser = argparse.ArgumentParser(prog='ergo view', description='View the model struture and training statistics.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('path', help='Path of the project.')
parser.add_argument('--img-only', dest='img_only', default=False, action='store_true', help="Save plots as PNG files but don't show them in a UI.")
args = parser.parse_args(argv)
return args |
class perm102_bmm_rrr(bmm):
def __init__(self):
super().__init__()
self._attrs['op'] = 'perm102_bmm_rrr'
def cal_align_ab(m, n, k):
return common.default_align_ab(k, n, self._attrs['inputs'][0].dtype())
self._attrs['f_ab_alignment'] = cal_align_ab
def _infer_shapes(self, a: Tensor, b: Tensor):
a_shapes = a._attrs['shape']
b_shapes = b._attrs['shape']
batch_size_a = a_shapes[1]
batch_size_b = b_shapes[0]
if ((batch_size_a != batch_size_b) and (batch_size_a != 1) and (batch_size_b != 1)):
raise RuntimeError('bmm operand A and B should have same batch_size, or batch_size = 1! Current shape A: {} shape B: {} .'.format(a_shapes, b_shapes))
batch_size = (batch_size_b if (batch_size_a == IntImm(1)) else batch_size_a)
return [a_shapes[0], batch_size, b_shapes[2]]
def _extract_dims(self, for_profiling=False):
return {'B': [common.DimInfo(common.Source.OUTPUT, tensor_idx=0, dim_idx=1)], 'M': [common.DimInfo(common.Source.INPUT, tensor_idx=0, dim_idx=0), common.DimInfo(common.Source.OUTPUT, tensor_idx=0, dim_idx=0)], 'N': [common.DimInfo(common.Source.INPUT, tensor_idx=1, dim_idx=2), common.DimInfo(common.Source.OUTPUT, tensor_idx=0, dim_idx=2)], 'K': [common.DimInfo(common.Source.INPUT, tensor_idx=0, dim_idx=2), common.DimInfo(common.Source.INPUT, tensor_idx=1, dim_idx=1)]}
def _invert_exec_key(self, key):
return common.gemm_inverse_key_func(key)
def _gen_profile_cmd(self, profiler_prefix, cfg, exec_key):
def fbuild_cmd(exec_key):
(B, M, N, K) = self._invert_exec_key(exec_key)
cmd = []
cmd.append(B)
cmd.append(M)
cmd.append(N)
cmd.append(K)
return cmd
return super()._gen_profile_cmd(profiler_prefix, cfg, exec_key, fbuild_cmd) |
(autouse=True)
def fail_from_errors_on_other_threads():
def pytest_excepthook(*args, **kwargs):
_errors.extend(args)
threading.excepthook = pytest_excepthook
(yield)
if _errors:
caught_errors_str = ', '.join([str(err) for err in _errors])
pytest.fail(f'''Caught exceptions from other threads:
{caught_errors_str}''') |
def density_minmax(density, bottom, top):
(density_bounds_min, density_bounds_max) = np.sort([density(bottom), density(top)])
kwargs = dict(bounds=[bottom, top], method='bounded')
minimum = np.min((minimize_scalar(density, **kwargs).fun, density_bounds_min))
maximum = np.max(((- minimize_scalar((lambda radius: (- density(radius))), **kwargs).fun), density_bounds_max))
return (minimum, maximum) |
def _get_overlay_arp_table(node, overlay_rule_name, overlay_vnics):
return {vnic.mac: {'arp': vnic.ip, 'port': vnic.node.get_overlay_port(overlay_rule_name), 'ip': vnic.node.get_overlay_ip(overlay_rule_name, remote=(vnic.node.dc_name != node.dc_name))} for vnic in overlay_vnics if (vnic.node != node)} |
class MayaviUIPlugin(Plugin):
VIEWS = 'envisage.ui.workbench.views'
PERSPECTIVES = 'envisage.ui.workbench.perspectives'
PREFERENCES_PAGES = 'envisage.ui.workbench.preferences_pages'
ACTION_SETS = 'envisage.ui.workbench.action_sets'
BANNER = 'envisage.plugins.ipython_shell.banner'
name = 'Mayavi UI plugin'
id = 'mayavi_ui'
views = List(contributes_to=VIEWS)
perspectives = List(contributes_to=PERSPECTIVES)
preferences_pages = List(contributes_to=PREFERENCES_PAGES)
action_sets = List(contributes_to=ACTION_SETS)
banner = List(contributes_to=BANNER)
def _views_default(self):
return [self._engine_view_factory, self._current_selection_view_factory]
def _perspectives_default(self):
return [MayaviPerspective]
def _preferences_pages_default(self):
from mayavi.preferences.mayavi_preferences_page import MayaviRootPreferencesPage, MayaviMlabPreferencesPage
return [MayaviRootPreferencesPage, MayaviMlabPreferencesPage]
def _action_sets_default(self):
from mayavi.plugins.mayavi_ui_action_set import MayaviUIActionSet
return [MayaviUIActionSet]
def _banner_default(self):
return ['Welcome to Mayavi, this is the interactive IPython shell.\n\nIf this is your first time using Mayavi, take a quick look at the tutorial examples section of the user guide, accessible via the help menu.\nTo use Mayavi, you need to load your data in "data sources" and apply "visualization modules" to it.\n']
def _engine_view_factory(self, window, **traits):
from pyface.workbench.traits_ui_view import TraitsUIView
from mayavi.core.ui.engine_view import EngineView
engine_view = EngineView(engine=self._get_engine(window))
tui_engine_view = TraitsUIView(obj=engine_view, id=ENGINE_VIEW, name='Mayavi', window=window, position='left', **traits)
return tui_engine_view
def _current_selection_view_factory(self, window, **traits):
from pyface.workbench.traits_ui_view import TraitsUIView
engine = self._get_engine(window)
tui_engine_view = TraitsUIView(obj=engine, view='current_selection_view', id=CURRENT_SELECTION_VIEW, name='Mayavi object editor', window=window, position='bottom', relative_to=ENGINE_VIEW, **traits)
return tui_engine_view
def _get_engine(self, window):
from mayavi.core.engine import Engine
return window.get_service(Engine)
def _get_script(self, window):
from mayavi.plugins.script import Script
return window.get_service(Script)
_trait_change('application.gui:started')
def _on_application_gui_started(self, obj, trait_name, old, new):
if ((trait_name != 'started') or (not new)):
return
app = self.application
window = app.workbench.active_window
script = self._get_script(window)
id = SHELL_VIEW
py = window.get_view_by_id(id)
if (py is None):
logger.warn(('*' * 80))
logger.warn("Can't find the Python shell view to bind variables")
return
try:
py.bind('mayavi', script)
py.bind('engine', script.engine)
try:
from apptools.naming.ui.api import explore
py.bind('explore', explore)
except ImportError:
pass
except AttributeError as msg:
logger.warn(msg)
logger.warn("Can't find the Python shell to bind variables") |
def get_default_flags(module: ModuleConf) -> List[Parameter]:
def_flags: List[Parameter] = []
if (module.default_flags._convert_ is not None):
def_flags.append(Parameter(name='_convert_', type_str='str', default=f'"{module.default_flags._convert_.name}"'))
if (module.default_flags._recursive_ is not None):
def_flags.append(Parameter(name='_recursive_', type_str='bool', default=str(module.default_flags._recursive_)))
return def_flags |
('cuda.bmm_rcr_permute.gen_function')
def gen_function(func_attrs, exec_cond_template, dim_info_dict):
default_mm_info = bmm_common.get_default_problem_info(PROBLEM_ARGS, alpha_value=func_attrs.get('alpha', 1))
(problem_args, _, input_addr_calculator, output_addr_calculator) = bmm_common.make_function_strided_args(func_attrs, dim_info_dict, default_mm_info, is_permute=True)
return bmm_permute_common.gen_function(func_attrs, exec_cond_template, problem_args, dim_info_dict, input_addr_calculator, output_addr_calculator, extra_code=common_permute.EXTRA_CODE.render()) |
def key_601_CosSin_2009():
dlf = DigitalFilter('Key 601 CosSin (2009)', 'key_601_CosSin_2009')
dlf.base = np.array([4.e-13, 4.e-13, 5.e-13, 5.e-13, 6.e-13, 6.e-13, 7.e-13, 8.e-13, 8.e-13, 9.e-13, 1.e-12, 1.e-12, 1.e-12, 1.e-12, 1.e-12, 1.e-12, 1.e-12, 2.e-12, 2.e-12, 2.e-12, 2.e-12, 3.e-12, 3.e-12, 3.e-12, 4.e-12, 4.e-12, 4.e-12, 5.e-12, 5.e-12, 6.e-12, 7.e-12, 7.e-12, 8.e-12, 9.e-12, 1.e-11, 1.e-11, 1.e-11, 1.e-11, 1.e-11, 1.e-11, 1.e-11, 2.e-11, 2.e-11, 2.e-11, 2.e-11, 3.e-11, 3.e-11, 3.e-11, 4.e-11, 4.e-11, 4.e-11, 5.e-11, 5.e-11, 6.e-11, 7.e-11, 7.e-11, 8.e-11, 9.e-11, 1.e-10, 1.e-10, 1.e-10, 1.e-10, 1.e-10, 1.e-10, 1.e-10, 2.e-10, 2.e-10, 2.e-10, 2.e-10, 2.e-10, 3.e-10, 3.e-10, 3.e-10, 4.e-10, 4.e-10, 5.e-10, 5.e-10, 6.e-10, 6.e-10, 7.e-10, 8.e-10, 9.e-10, 1.e-09, 1.e-09, 1.e-09, 1.e-09, 1.e-09, 1.e-09, 1.e-09, 1.e-09, 2.e-09, 2.e-09, 2.e-09, 2.e-09, 3.e-09, 3.e-09, 3.e-09, 4.e-09, 4.e-09, 5.e-09, 5.e-09, 6.e-09, 6.e-09, 7.e-09, 8.e-09, 9.e-09, 9.e-09, 1.e-08, 1.e-08, 1.e-08, 1.e-08, 1.e-08, 1.e-08, 1.e-08, 2.e-08, 2.e-08, 2.e-08, 2.e-08, 3.e-08, 3.e-08, 3.e-08, 4.e-08, 4.e-08, 4.e-08, 5.e-08, 6.e-08, 6.e-08, 7.e-08, 8.e-08, 8.e-08, 9.e-08, 1.e-07, 1.e-07, 1.e-07, 1.e-07, 1.e-07, 1.e-07, 1.e-07, 2.e-07, 2.e-07, 2.e-07, 2.e-07, 3.e-07, 3.e-07, 3.e-07, 4.e-07, 4.e-07, 4.e-07, 5.e-07, 5.e-07, 6.e-07, 7.e-07, 7.e-07, 8.e-07, 9.e-07, 1.e-06, 1.e-06, 1.e-06, 1.e-06, 1.e-06, 1.e-06, 1.e-06, 2.e-06, 2.e-06, 2.e-06, 2.e-06, 2.e-06, 3.e-06, 3.e-06, 3.e-06, 4.e-06, 4.e-06, 5.e-06, 5.e-06, 6.e-06, 6.e-06, 7.e-06, 8.e-06, 9.e-06, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 2.e-05, 2.e-05, 2.e-05, 2.e-05, 3.e-05, 3.e-05, 3.e-05, 4.e-05, 4.e-05, 5.e-05, 5.e-05, 6.e-05, 6.e-05, 7.e-05, 8.e-05, 9.e-05, 9.e-05, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.0, 1., 1., 1., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 4., 4., 5., 5., 6., 6., 7., 8., 8., 9., 10., 11., 13., 14., 15., 17., 19., 20., 22., 25., 27., 30., 33., 36., 40., 44., 49., 54., 59., 65., 71., 79., 86., 95., 105., 115., 127., 139., 153., 169., 185., 204., 224., 247., 271., 298., 328., 361., 397., 437., 480., 528., 581., 639., 702., 772., 849., 934., 1027., 1130., 1242., 1366., 1502., 1652., 1817., 1998., 2197., 2416., 2657., 2921., 3213., 3533., 3885., 4272., 4698., 5166., 5681., 6247., 6870., 7555., 8308., 9136., 10046., 11047., 12148., 13359., 14691., 16155., 17765., 19535., 21482., 23623., 25977., 28566., 31413., 34544., 37987., 41772., 45935., 50513., 55547., 61083., 67171., 73865., 81226., 89321., 98223., 108012., 118776., 130613., 143630., 157944., 173685., 190994., 210028., 230960., 253977., 279288., 307121., 337729., 371387., 408399., 449099., 493856., 543073., 597195., 656711., 722158., 794128., 873269., 960299., 1056001., 1161241., 1276969., 1404230., 1544174., 1698065., 1867292., 2053384., 2258022., 2483054., 2730512., 3002632., 3301871., 3630932., 3992786., 4390703., 4828275., 5309456., 5838590., 6420457., 7060313., 7763936., 8537681., 9388536., ., ., ., .4919435, ., ., .5667834, ., ., ., ., ., ., ., .8312318, ., ., ., ., ., ., .5053311, ., ., .5523093, .8124983, .8561492, .4680389, .0287357, .7775358, .9631873, .7374508, .6286749, .5125952, .1852297, .948736, .0572838, .4493049, .9291403, .8708979, .6174309, .0566983, .3973287, .9578503, .8549215, .8529267, .3508301, .5657258, .4631569, .922392, .054988, .566463, .617332, .860753, .274437, .136475, .096829, .855137, .566909, .868766, .454121, .571205, .794456, .091177, .737535, .216197, .05567, .874397, .916223, .378973, .144282, .441036, .877066, .55256, .05844, .54237, .22501, .34417, .13861, .84949, .59618, .21499, .66827, .46616, .81807, .18365, .89348, .05315, .67462, .7209, .48837, .67554, .02067, .17545, .42746, .21468, .60141, .8788, .4995, .3241, .7981, .886, .5673, .3316, .8982, .6628, .2321, .843, .5668, .0431, .379, .5266, .9733, .64, .0765, .7314, .7781, .7671, .6346, .6213, .1061, .975, .554, .415, .691, .318, .071, .996, .07, .971, .678])
dlf.factor = np.array([1.])
dlf.cos = np.array([9.e-09, (- 9.e-08), 4.e-07, (- 1.e-06), 4.e-06, (- 9.e-06), 1.e-05, (- 2.e-05), 3.e-05, (- 5.e-05), 6.e-05, (- 8.e-05), 9.e-05, (- 0.), 0., (- 0.), 0., (- 0.), 9.e-05, (- 8.e-05), 8.e-05, (- 7.e-05), 6.e-05, (- 6.e-05), 5.e-05, (- 5.e-05), 4.e-05, (- 4.e-05), 4.e-05, (- 3.e-05), 3.e-05, (- 3.e-05), 3.e-05, (- 3.e-05), 3.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 9.e-06), 9.e-06, (- 9.e-06), 9.e-06, (- 8.e-06), 8.e-06, (- 8.e-06), 8.e-06, (- 7.e-06), 8.e-06, (- 7.e-06), 7.e-06, (- 6.e-06), 7.e-06, (- 6.e-06), 6.e-06, (- 5.e-06), 6.e-06, (- 5.e-06), 6.e-06, (- 4.e-06), 5.e-06, (- 4.e-06), 5.e-06, (- 3.e-06), 5.e-06, (- 3.e-06), 5.e-06, (- 2.e-06), 5.e-06, (- 2.e-06), 5.e-06, (- 1.e-06), 4.e-06, (- 7.e-07), 4.e-06, 9.e-08, 4.e-06, 9.e-07, 5.e-06, 1.e-06, 5.e-06, 2.e-06, 6.e-06, 4.e-06, 7.e-06, 5.e-06, 8.e-06, 6.e-06, 9.e-06, 8.e-06, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 2.e-05, 2.e-05, 2.e-05, 2.e-05, 2.e-05, 3.e-05, 3.e-05, 3.e-05, 4.e-05, 4.e-05, 5.e-05, 5.e-05, 6.e-05, 6.e-05, 7.e-05, 8.e-05, 9.e-05, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), 0., 0., 0., 0., 0., (- 0.), (- 0.), (- 0.), (- 0.), 0., 1., (- 0.), (- 1.), 0., 1., (- 0.), (- 1.), 2., (- 2.), 1., (- 0.), 0., (- 0.), 0., (- 0.), 2.e-05, 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 9.e-05, (- 8.e-05), 8.e-05, (- 7.e-05), 6.e-05, (- 6.e-05), 5.e-05, (- 5.e-05), 4.e-05, (- 4.e-05), 3.e-05, (- 3.e-05), 3.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 9.e-06), 8.e-06, (- 8.e-06), 8.e-06, (- 7.e-06), 7.e-06, (- 7.e-06), 7.e-06, (- 7.e-06), 7.e-06, (- 7.e-06), 7.e-06, (- 7.e-06), 7.e-06, (- 7.e-06), 7.e-06, (- 7.e-06), 6.e-06, (- 6.e-06), 6.e-06, (- 5.e-06), 4.e-06, (- 4.e-06), 3.e-06, (- 3.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 9.e-07, (- 7.e-07), 6.e-07, (- 5.e-07), 5.e-07, (- 4.e-07), 4.e-07, (- 5.e-07), 5.e-07, (- 6.e-07), 7.e-07, (- 9.e-07), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 1.e-06, (- 1.e-06), 7.e-07, (- 2.e-07), (- 2.e-07), 6.e-07, (- 9.e-07), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 9.e-07, (- 7.e-07), 6.e-07, (- 4.e-07), 2.e-07, 1.e-08, (- 3.e-07), 6.e-07, (- 9.e-07), 1.e-06, (- 1.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 3.e-06, (- 3.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 1.e-06), 1.e-06, (- 9.e-07), 5.e-07, (- 2.e-07), 7.e-08, 7.e-08, (- 1.e-07), 1.e-07, (- 1.e-07), 1.e-07, (- 8.e-08), 4.e-08, (- 2.e-09), (- 1.e-08), 2.e-08, (- 2.e-08), 1.e-08, (- 4.e-09), 1.e-09, (- 1.e-10)])
dlf.sin = np.array([8.e-12, (- 9.e-11), 5.e-10, (- 1.e-09), 5.e-09, (- 1.e-08), 2.e-08, (- 3.e-08), 4.e-08, (- 4.e-08), 3.e-08, 1.e-08, (- 9.e-08), 2.e-07, (- 4.e-07), 6.e-07, (- 8.e-07), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 9.e-07, (- 8.e-07), 7.e-07, (- 7.e-07), 6.e-07, (- 5.e-07), 4.e-07, (- 4.e-07), 3.e-07, (- 3.e-07), 3.e-07, (- 2.e-07), 1.e-07, (- 6.e-08), (- 4.e-08), 1.e-07, (- 2.e-07), 3.e-07, (- 4.e-07), 5.e-07, (- 6.e-07), 6.e-07, (- 6.e-07), 6.e-07, (- 6.e-07), 6.e-07, (- 5.e-07), 5.e-07, (- 4.e-07), 4.e-07, (- 4.e-07), 3.e-07, (- 3.e-07), 3.e-07, (- 3.e-07), 3.e-07, (- 3.e-07), 3.e-07, (- 3.e-07), 3.e-07, (- 3.e-07), 4.e-07, (- 4.e-07), 4.e-07, (- 5.e-07), 5.e-07, (- 6.e-07), 6.e-07, (- 6.e-07), 6.e-07, (- 6.e-07), 6.e-07, (- 5.e-07), 5.e-07, (- 4.e-07), 3.e-07, (- 2.e-07), 1.e-07, (- 4.e-08), (- 3.e-08), 9.e-08, (- 1.e-07), 1.e-07, (- 1.e-07), 1.e-07, (- 1.e-07), 1.e-07, (- 7.e-08), 3.e-08, 8.e-09, (- 5.e-08), 9.e-08, (- 1.e-07), 1.e-07, (- 2.e-07), 2.e-07, (- 3.e-07), 3.e-07, (- 4.e-07), 4.e-07, (- 5.e-07), 5.e-07, (- 6.e-07), 7.e-07, (- 7.e-07), 8.e-07, (- 9.e-07), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 5.e-06), 5.e-06, (- 5.e-06), 6.e-06, (- 6.e-06), 7.e-06, (- 7.e-06), 8.e-06, (- 8.e-06), 9.e-06, (- 9.e-06), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 3.e-05, (- 3.e-05), 3.e-05, (- 3.e-05), 3.e-05, (- 4.e-05), 4.e-05, (- 4.e-05), 4.e-05, (- 5.e-05), 5.e-05, (- 5.e-05), 6.e-05, (- 6.e-05), 7.e-05, (- 7.e-05), 8.e-05, (- 8.e-05), 9.e-05, (- 9.e-05), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., (- 0.), 0., (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), (- 0.), 0., 0., 0., 0., (- 0.), (- 1.), (- 0.), 0., 1., (- 0.), (- 1.), (- 0.), 2., (- 2.), 1., 0., (- 0.), 1., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 9.e-05), 8.e-05, (- 8.e-05), 7.e-05, (- 7.e-05), 6.e-05, (- 6.e-05), 6.e-05, (- 5.e-05), 5.e-05, (- 5.e-05), 4.e-05, (- 4.e-05), 4.e-05, (- 4.e-05), 3.e-05, (- 3.e-05), 3.e-05, (- 3.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 2.e-05), 2.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 1.e-05, (- 1.e-05), 9.e-06, (- 9.e-06), 8.e-06, (- 8.e-06), 7.e-06, (- 7.e-06), 6.e-06, (- 6.e-06), 6.e-06, (- 5.e-06), 5.e-06, (- 5.e-06), 4.e-06, (- 4.e-06), 4.e-06, (- 4.e-06), 3.e-06, (- 3.e-06), 3.e-06, (- 3.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 2.e-06), 2.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 1.e-06, (- 1.e-06), 9.e-07, (- 9.e-07), 8.e-07, (- 8.e-07), 7.e-07, (- 7.e-07), 6.e-07, (- 6.e-07), 5.e-07, (- 5.e-07), 5.e-07, (- 4.e-07), 4.e-07, (- 4.e-07), 4.e-07, (- 3.e-07), 3.e-07, (- 3.e-07), 3.e-07, (- 2.e-07), 2.e-07, (- 2.e-07), 2.e-07, (- 2.e-07), 1.e-07, (- 1.e-07), 1.e-07, (- 1.e-07), 1.e-07, (- 1.e-07), 1.e-07, (- 1.e-07), 9.e-08, (- 8.e-08), 6.e-08, (- 6.e-08), 5.e-08, (- 4.e-08), 3.e-08, (- 2.e-08), 2.e-08, (- 1.e-08), 1.e-08, (- 7.e-09), 4.e-09, (- 2.e-09), 1.e-09, (- 5.e-10), 1.e-10, (- 5.e-11), 1.e-11, (- 1.e-12)])
return dlf |
def get_rates(temperature, reactant_thermos, ts_thermo, product_thermos=None):
G_TS = ts_thermo.G
imag_wavenumber = ts_thermo.org_wavenumbers[0]
assert (imag_wavenumber < 0.0)
imag_frequency = ((imag_wavenumber * C) * 100)
kappa_wigner = wigner_corr(temperature, imag_frequency)
kappa_bell = bell_corr(temperature, imag_frequency)
Gs_reactant = [thermo.G for thermo in reactant_thermos]
G_reactant = sum(Gs_reactant)
fw_barrier_height = (G_TS - G_reactant)
fw_rate_eyring = eyring_rate(fw_barrier_height, temperature)
if product_thermos:
Gs_product = [thermo.G for thermo in product_thermos]
G_product = sum(Gs_product)
bw_barrier_height = (G_TS - G_product)
bw_rate_eyring = eyring_rate(bw_barrier_height, temperature)
kappa_eckart = eckart_corr(fw_barrier_height, bw_barrier_height, temperature, imag_frequency)
else:
kappa_eckart = None
def make_rx_rates(from_, barrier, rate_eyring, kappa_eyring=1.0):
kwargs = {}
if (kappa_wigner and (not np.isnan(kappa_wigner))):
kwargs.update({'kappa_wigner': kappa_wigner, 'rate_wigner': (kappa_wigner * rate_eyring)})
if (kappa_bell and (kappa_bell > 0.0)):
kwargs.update({'kappa_bell': kappa_bell, 'rate_bell': (kappa_bell * rate_eyring)})
if (kappa_eckart and (not np.isnan(kappa_eckart))):
kwargs.update({'kappa_eckart': kappa_eckart, 'rate_eckart': (kappa_eckart * rate_eyring)})
rx_rates = ReactionRates(from_=from_, barrier=barrier, barrier_si=(barrier * AU2KJPERMOL), temperature=temperature, imag_wavenumber=imag_wavenumber, imag_frequency=imag_frequency, rate_eyring=rate_eyring, kappa_eyring=kappa_eyring, **kwargs)
return rx_rates
reactant_rx_rates = make_rx_rates('Reactant(s)', fw_barrier_height, fw_rate_eyring)
rx_rates = [reactant_rx_rates]
if product_thermos:
product_rx_rates = make_rx_rates('Product(s)', bw_barrier_height, bw_rate_eyring)
rx_rates.append(product_rx_rates)
return rx_rates |
_validator
def validate_release(request, **kwargs):
releasename = request.validated.get('release')
if (releasename is None):
return
db = request.db
release = db.query(Release).filter(or_((Release.name == releasename), (Release.name == releasename.upper()), (Release.version == releasename))).first()
if release:
request.validated['release'] = release
else:
request.errors.add('querystring', 'release', 'Invalid release specified: {}'.format(releasename)) |
class IOStrategy(_common.FlyteIdlEntity):
DOWNLOAD_MODE_EAGER = _core_task.IOStrategy.DOWNLOAD_EAGER
DOWNLOAD_MODE_STREAM = _core_task.IOStrategy.DOWNLOAD_STREAM
DOWNLOAD_MODE_NO_DOWNLOAD = _core_task.IOStrategy.DO_NOT_DOWNLOAD
UPLOAD_MODE_EAGER = _core_task.IOStrategy.UPLOAD_EAGER
UPLOAD_MODE_ON_EXIT = _core_task.IOStrategy.UPLOAD_ON_EXIT
UPLOAD_MODE_NO_UPLOAD = _core_task.IOStrategy.DO_NOT_UPLOAD
def __init__(self, download_mode: _core_task.IOStrategy.DownloadMode=DOWNLOAD_MODE_EAGER, upload_mode: _core_task.IOStrategy.UploadMode=UPLOAD_MODE_ON_EXIT):
self._download_mode = download_mode
self._upload_mode = upload_mode
def to_flyte_idl(self) -> _core_task.IOStrategy:
return _core_task.IOStrategy(download_mode=self._download_mode, upload_mode=self._upload_mode)
def from_flyte_idl(cls, pb2_object: _core_task.IOStrategy):
if (pb2_object is None):
return None
return cls(download_mode=pb2_object.download_mode, upload_mode=pb2_object.upload_mode) |
class ScatterInspector(SelectTool):
persistent_hover = Bool(False)
hover_metadata_name = Str('hover')
selection_metadata_name = Str('selections')
inspector_event = Event(ScatterInspectorEvent)
visible = False
draw_mode = 'none'
def normal_mouse_move(self, event):
plot = self.component
index = plot.map_index((event.x, event.y), threshold=self.threshold)
insp_event = ScatterInspectorEvent(event_type=HOVER_EVENT, event_index=index)
if (index is not None):
old = plot.index.metadata.get(self.hover_metadata_name, None)
plot.index.metadata[self.hover_metadata_name] = [index]
if (old != [index]):
self.inspector_event = insp_event
if hasattr(plot, 'value'):
plot.value.metadata[self.hover_metadata_name] = [index]
elif (not self.persistent_hover):
old = plot.index.metadata.pop(self.hover_metadata_name, None)
if old:
self.inspector_event = insp_event
if hasattr(plot, 'value'):
plot.value.metadata.pop(self.hover_metadata_name, None)
def _get_selection_state(self, event):
plot = self.component
index = plot.map_index((event.x, event.y), threshold=self.threshold)
already_selected = False
for name in ('index', 'value'):
if (not hasattr(plot, name)):
continue
md = getattr(plot, name).metadata
if ((md is None) or (self.selection_metadata_name not in md)):
continue
if (index in md[self.selection_metadata_name]):
already_selected = True
break
return (already_selected, (index is not None))
def _get_selection_token(self, event):
plot = self.component
index = plot.map_index((event.x, event.y), threshold=self.threshold)
return index
def _deselect(self, index=None):
plot = self.component
insp_event = ScatterInspectorEvent(event_type=DESELECT_EVENT, event_index=index)
for name in ('index', 'value'):
if (not hasattr(plot, name)):
continue
md = getattr(plot, name).metadata
if (self.selection_metadata_name not in md):
pass
elif (index in md[self.selection_metadata_name]):
new_list = md[self.selection_metadata_name][:]
new_list.remove(index)
md[self.selection_metadata_name] = new_list
if (name == 'index'):
self.inspector_event = insp_event
elif (index is None):
md[self.selection_metadata_name] = []
if (name == 'index'):
self.inspector_event = insp_event
def _select(self, index, append=True):
plot = self.component
insp_event = ScatterInspectorEvent(event_type=SELECT_EVENT, event_index=index)
for name in ('index', 'value'):
if (not hasattr(plot, name)):
continue
md = getattr(plot, name).metadata
selection = md.get(self.selection_metadata_name, None)
if (selection is None):
md[self.selection_metadata_name] = [index]
elif append:
if (index not in md[self.selection_metadata_name]):
new_list = (md[self.selection_metadata_name] + [index])
md[self.selection_metadata_name] = new_list
getattr(plot, name).metadata_changed = True
else:
md[self.selection_metadata_name] = [index]
if (name == 'index'):
self.inspector_event = insp_event |
class ModelTest(BasePyTestCase):
klass = None
attrs = {}
_populate_db = False
def setup_method(self):
super(ModelTest, self).setup_method(self)
buildsys.setup_buildsystem({'buildsystem': 'dev'})
if (type(self) is not ModelTest):
try:
new_attrs = {}
new_attrs.update(self.attrs)
new_attrs.update(self.do_get_dependencies())
with mock.patch('bodhi.server.models.notifications'):
self.obj = self.klass(**new_attrs)
self.db.add(self.obj)
self.db.flush()
return self.obj
except Exception:
self.db.rollback()
raise
def do_get_dependencies(self):
return {}
def test_create_obj(self):
pass
def test_query_obj(self):
for (key, value) in self.attrs.items():
assert (getattr(self.obj, key) == value)
def test_json(self):
if (type(self) is not ModelTest):
assert isinstance(json.dumps(self.obj.__json__()), str)
def test_get(self):
if (type(self) is not ModelTest):
for col in self.obj.__get_by__:
assert (self.klass.get(getattr(self.obj, col)) == self.obj) |
class TestActions(unittest.TestCase):
def test_output_equality(self):
action = ofp.action.output(port=1, max_len=4660)
action2 = ofp.action.output(port=1, max_len=4660)
self.assertEquals(action, action2)
action2.port = 2
self.assertNotEquals(action, action2)
action2.port = 1
action2.max_len = 65535
self.assertNotEquals(action, action2)
action2.max_len = 4660 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.