code
stringlengths
281
23.7M
def update_hint_locations(game: RandovaniaGame, hint_tree_widget: QtWidgets.QTreeWidget): game_description = default_database.game_description_for(game) used_hint_kind = set() hint_tree_widget.clear() hint_tree_widget.setSortingEnabled(False) hint_type_tree = collections.defaultdict(dict) for region in game_description.region_list.regions: for area in region.areas: hint_types = {} for node in area.nodes: if isinstance(node, HintNode): used_hint_kind.add(node.kind) if ('translator' in node.extra): hint_types[node.kind] = node.extra['translator'] else: hint_types[node.kind] = '' if hint_types: hint_type_tree[region.correct_name(area.in_dark_aether)][area.name] = hint_types number_for_hint_type = {hint_type: (i + 1) for (i, hint_type) in enumerate(sorted(used_hint_kind, key=(lambda it: it.long_name)))} for (region_name, area_hints) in hint_type_tree.items(): region_item = QtWidgets.QTreeWidgetItem(hint_tree_widget) region_item.setText(0, region_name) region_item.setExpanded(True) for (area_name, hint_types) in area_hints.items(): area_item = QtWidgets.QTreeWidgetItem(region_item) area_item.setText(0, area_name) for (hint_type, text) in hint_types.items(): area_item.setText(number_for_hint_type[hint_type], text) used_hint_kind.add(hint_type) hint_tree_widget.resizeColumnToContents(0) hint_tree_widget.setSortingEnabled(True) hint_tree_widget.sortByColumn(0, QtCore.Qt.AscendingOrder) for hint_kind in used_hint_kind: hint_tree_widget.headerItem().setText(number_for_hint_type[hint_kind], hint_kind.long_name)
def preprocess_input(tokenizer, sentences): inputs = [] MAX_LEN = 64 for sentence in sentences: encoded_sent = tokenizer.encode(sentence, add_special_tokens=True) if (len(encoded_sent) < MAX_LEN): encoded_sent.extend(([0] * (MAX_LEN - len(encoded_sent)))) if (len(encoded_sent) > MAX_LEN): print('WARNING: During preprocessing, number of tokens for the sentence {}exceedeed MAX LENGTH {}. This might impact accuracy of the results'.format(sentence, MAX_LEN)) encoded_sent = encoded_sent[:MAX_LEN] att_mask = [int((token_id > 0)) for token_id in encoded_sent] inputs.append({'input_ids': torch.unsqueeze(torch.tensor(encoded_sent), 0), 'attention_mask': torch.unsqueeze(torch.tensor(att_mask), 0)}) return inputs
def timeout(seconds=10): def decorator(func): (func) def wrapper(*args, **kwargs): def handle_timeout(signum, frame): raise Exception() signal(SIGALRM, handle_timeout) alarm(seconds) result = None try: result = func(*args, **kwargs) finally: alarm(0) return result return wrapper return decorator
def start_global_server( (int | None)=None, urls: list[str]=['.'], server: type[ServerType]=BottleServer, **server_args: Unpack[ServerArgs]) -> tuple[(str, (str | None), BottleServer)]: global global_server (address, common_path, global_server) = start_server(urls=urls, server=server, **server_args) return (address, common_path, global_server)
.mosaiqdb def test_get_patient_fields(connection: pymedphys.mosaiq.Connection): mock_patient_ident_df = mocks.create_mock_patients() mock_site_df = mocks.create_mock_treatment_sites(mock_patient_ident_df) mocks.create_mock_treatment_fields(mock_site_df) fields_for_moe_df = helpers.get_patient_fields(connection, 'MR8002') print(fields_for_moe_df) field_count = 3 assert (len(fields_for_moe_df) == field_count) for (_, txfield) in fields_for_moe_df.iterrows(): field_id = txfield['field_id'] assert (f"Field{txfield['field_label']}" == txfield['field_name']) (total_mu, point_results) = delivery.delivery_data_sql(connection, field_id) assert (total_mu == 100) print(point_results) current_index = 0.0 for (_, tx_point) in point_results.iterrows(): assert (tx_point[0] >= current_index) current_index = tx_point[0]
def resolve_remote(uri, handlers): scheme = urlparse.urlsplit(uri).scheme if (scheme in handlers): result = handlers[scheme](uri) else: from urllib.request import urlopen req = urlopen(uri) encoding = (req.info().get_content_charset() or 'utf-8') try: result = json.loads(req.read().decode(encoding)) except ValueError as exc: raise JsonSchemaDefinitionException('{} failed to decode: {}'.format(uri, exc)) return result
_families def test_logging_passing_tests_disabled_does_not_log_test_output(pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str) -> None: pytester.makeini('\n [pytest]\n junit_log_passing_tests=False\n junit_logging=system-out\n junit_family={family}\n '.format(family=xunit_family)) pytester.makepyfile("\n import pytest\n import logging\n import sys\n\n def test_func():\n sys.stdout.write('This is stdout')\n sys.stderr.write('This is stderr')\n logging.warning('hello')\n ") (result, dom) = run_and_parse(family=xunit_family) assert (result.ret == 0) node = dom.find_first_by_tag('testcase') assert (len(node.find_by_tag('system-err')) == 0) assert (len(node.find_by_tag('system-out')) == 0)
class TransactWrite(Transaction): def __init__(self, client_request_token: Optional[str]=None, return_item_collection_metrics: Optional[str]=None, **kwargs: Any) -> None: super(TransactWrite, self).__init__(**kwargs) self._client_request_token: Optional[str] = client_request_token self._return_item_collection_metrics = return_item_collection_metrics self._condition_check_items: List[Dict] = [] self._delete_items: List[Dict] = [] self._put_items: List[Dict] = [] self._update_items: List[Dict] = [] self._models_for_version_attribute_update: List[Any] = [] def condition_check(self, model_cls: Type[_M], hash_key: _KeyType, range_key: Optional[_KeyType]=None, condition: Optional[Condition]=None): if (condition is None): raise TypeError('`condition` cannot be None') operation_kwargs = model_cls.get_operation_kwargs_from_class(hash_key, range_key=range_key, condition=condition) self._condition_check_items.append(operation_kwargs) def delete(self, model: _M, condition: Optional[Condition]=None, *, add_version_condition: bool=True) -> None: operation_kwargs = model.get_delete_kwargs_from_instance(condition=condition, add_version_condition=add_version_condition) self._delete_items.append(operation_kwargs) def save(self, model: _M, condition: Optional[Condition]=None, return_values: Optional[str]=None) -> None: operation_kwargs = model.get_save_kwargs_from_instance(condition=condition, return_values_on_condition_failure=return_values) self._put_items.append(operation_kwargs) self._models_for_version_attribute_update.append(model) def update(self, model: _M, actions: List[Action], condition: Optional[Condition]=None, return_values: Optional[str]=None, *, add_version_condition: bool=True) -> None: operation_kwargs = model.get_update_kwargs_from_instance(actions=actions, condition=condition, return_values_on_condition_failure=return_values, add_version_condition=add_version_condition) self._update_items.append(operation_kwargs) self._models_for_version_attribute_update.append(model) def _commit(self) -> Any: response = self._connection.transact_write_items(condition_check_items=self._condition_check_items, delete_items=self._delete_items, put_items=self._put_items, update_items=self._update_items, client_request_token=self._client_request_token, return_consumed_capacity=self._return_consumed_capacity, return_item_collection_metrics=self._return_item_collection_metrics) for model in self._models_for_version_attribute_update: model.update_local_version_attribute() return response
class ConflictCause(IncompatibilityCause): def __init__(self, conflict: Incompatibility, other: Incompatibility) -> None: self._conflict = conflict self._other = other def conflict(self) -> Incompatibility: return self._conflict def other(self) -> Incompatibility: return self._other def __str__(self) -> str: return str(self._conflict)
_criterion('winogrande') class WinograndeCriterion(WSCCriterion): def forward(self, model, sample, reduce=True): query_lprobs = self.get_lprobs(model, sample['query_tokens'], sample['query_masks']) cand_lprobs = self.get_lprobs(model, sample['candidate_tokens'], sample['candidate_masks']) pred = (query_lprobs >= cand_lprobs) loss = self.get_loss(query_lprobs, cand_lprobs) sample_size = sample['query_tokens'].size(0) ncorrect = pred.sum().item() logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size, 'ncorrect': ncorrect, 'nqueries': sample_size} return (loss, sample_size, logging_output)
class ModelFormTagFieldRequiredTest(TagTestManager, TestCase): manage_models = [test_models.TagFieldRequiredModel] def setUpExtra(self): self.form = test_forms.TagFieldRequiredModelForm self.model = test_models.TagFieldRequiredModel self.tag_model = self.model.tag.tag_model def test_required_tagfield_set(self): form = self.form(data={'name': 'Test 1', 'tag': 'red, blue'}) self.assertTrue(form.is_valid()) t1 = form.save() self.assertEqual(t1.name, 'Test 1') self.assertEqual(t1.tag, 'blue, red') self.assertEqual(str(t1.tag), 'blue, red') self.assertIsInstance(t1.tag, tag_models.TagRelatedManagerMixin) self.assertEqual(t1.tag.count(), 2) self.assertEqual(t1.tag.all()[0], 'blue') self.assertEqual(t1.tag.all()[1], 'red') self.assertTagModel(self.tag_model, {'red': 1, 'blue': 1}) def test_required_tagfield_empty(self): form = self.form(data={'name': 'Test 1'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors.keys()), 1) self.assertEqual(list(form.errors.keys())[0], 'tag') self.assertEqual(len(form.errors['tag']), 1) self.assertEqual(form.errors['tag'][0], 'This field is required.')
def test_get_conference_roles_for_user(conference_factory, requests_mock): conference = conference_factory() requests_mock.get(f'{settings.PRETIX_API}organizers/base-pretix-organizer-id/events/base-pretix-event-id/vouchers', status_code=200, json={'next': None, 'results': []}) requests_mock.get(f'{settings.PRETIX_API}organizers/base-pretix-organizer-id/events/base-pretix-event-id/tickets/attendee-tickets', status_code=200, json=[{'id': 1, 'voucher': None, 'attendee_email': '', 'item': {'admission': True}}]) roles = get_conference_roles_for_user(conference=conference, user_id=UserFactory(email='').id, user_email='') assert (roles == [Role.ATTENDEE])
def download_huggingface_tokenizers(): huggingface_dir = './data/huggingface' if (not os.path.isdir(huggingface_dir)): os.makedirs(huggingface_dir, exist_ok=True) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') tokenizer.save_pretrained('{}/bert-base-uncased'.format(huggingface_dir)) tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') tokenizer.save_pretrained('{}/distilbert-base-uncased'.format(huggingface_dir)) else: logger.info('Huggingface data exists. No need to download')
def get_video_dataset_dicts(dataset_names, gen_inst_id=False): assert len(dataset_names) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] for (dataset_name, dicts) in zip(dataset_names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) inst_count = 1000000 tot_inst_count = 0 video_datasets = [] for (source_id, (dataset_name, dicts)) in enumerate(zip(dataset_names, dataset_dicts)): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) videos = {} single_video_id = 1000000 id_map = {} for image in dicts: video_id = image.get('video_id', (- 1)) if (video_id == (- 1)): single_video_id = (single_video_id + 1) video_id = single_video_id if (video_id not in videos): videos[video_id] = {'video_id': video_id, 'images': [], 'dataset_source': source_id} if gen_inst_id: for x in image['annotations']: if (('instance_id' not in x) or (x['instance_id'] <= 0)): inst_count += 1 x['instance_id'] = inst_count if 0: for x in image['annotations']: if (x['instance_id'] not in id_map): tot_inst_count = (tot_inst_count + 1) id_map[x['instance_id']] = tot_inst_count x['instance_id'] = id_map[x['instance_id']] videos[video_id]['images'].append(image) video_datasets.append([v for v in videos.values()]) video_datasets = list(itertools.chain.from_iterable(video_datasets)) return video_datasets
class TestTimePoolHead(nn.Module): def __init__(self, base, original_pool=7): super(TestTimePoolHead, self).__init__() self.base = base self.original_pool = original_pool base_fc = self.base.get_classifier() if isinstance(base_fc, nn.Conv2d): self.fc = base_fc else: self.fc = nn.Conv2d(self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) self.base.reset_classifier(0) def forward(self, x): x = self.base.forward_features(x) x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) x = self.fc(x) x = adaptive_avgmax_pool2d(x, 1) return x.view(x.size(0), (- 1))
class WarmupConstantSchedule(LambdaLR): def __init__(self, optimizer, warmup_steps, last_epoch=(- 1)): self.warmup_steps = warmup_steps super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) def lr_lambda(self, step): if (step < self.warmup_steps): return (float(step) / float(max(1.0, self.warmup_steps))) return 1.0
class ConferenceModeratorAdmin(AuditAdmin): list_display = (('conference', 'moderator', 'active') + AuditAdmin.list_display) list_filter = ('conference',) def get_queryset(self, request): qs = super(ConferenceModeratorAdmin, self).get_queryset(request) if request.user.is_superuser: return qs moderators = service.list_conference_moderator(user=request.user) return qs.filter(conference__in=[m.conference for m in moderators])
class PlanSelector(discord.ui.Select): def __init__(self, plans: List[PremiumPlan]): super().__init__(placeholder='Select a Quotient Premium Plan... ') for _ in plans: self.add_option(label=f'{_.name} - {_.price}', description=_.description, value=_.id) async def callback(self, interaction: discord.Interaction): (await interaction.response.defer()) self.view.plan = self.values[0] self.view.stop()
def run_params(args): params = deepcopy(vars(args)) params['model'] = 'MLP_SIG' params['optimizer'] = 'Adam' if (args.data_cache_path != 'None'): pathlib.Path(args.data_cache_path).mkdir(parents=True, exist_ok=True) if (args.mode == 'pretrain'): if (args.method == 'Pretrain'): params['loss'] = 'cross_entropy_loss' params['dataset'] = 'baseline_prtrain' elif (args.method == 'Pretrain_1d'): params['loss'] = 'cross_entropy_loss' params['dataset'] = ('baseline_pretrain_1d_cut_hour_' + str(args.C)) elif (args.method == 'DFM'): params['loss'] = 'delayed_feedback_loss' params['dataset'] = 'dfm_prtrain' params['model'] = 'MLP_EXP_DELAY' elif (args.method == 'ES-DFM'): params['loss'] = 'tn_dp_pretraining_loss' params['dataset'] = ('tn_dp_mask30d_pretrain_cut_hour_' + str(args.C)) params['model'] = 'MLP_tn_dp' elif (args.method == 'ES-DFM_1d'): params['loss'] = 'tn_dp_pretraining_loss' params['dataset'] = ('tn_dp_mask30d_pretrain_1d_cut_hour_' + str(args.C)) params['model'] = 'MLP_tn_dp' elif (args.method == 'Bi-DEFUSE_MLP'): params['loss'] = 'inw_outw_cross_entropy_loss' params['dataset'] = ('bidefuse_pretrain_cut_hour_' + str(args.C)) params['model'] = 'Bi-DEFUSE_MLP' elif (args.method == 'Bi-DEFUSE_MLP_1d'): params['loss'] = 'inw_outw_cross_entropy_loss' params['dataset'] = ('bidefuse_pretrain_1d_cut_hour_' + str(args.C)) params['model'] = 'Bi-DEFUSE_MLP' elif (args.method == 'Bi-DEFUSE'): params['loss'] = 'inw_outw_cross_entropy_loss' params['dataset'] = ('bidefuse_pretrain_cut_hour_' + str(args.C)) params['model'] = 'Bi-DEFUSE_inoutw' elif (args.method == 'Bi-DEFUSE_1d'): params['loss'] = 'inw_outw_cross_entropy_loss' params['dataset'] = ('bidefuse_pretrain_1d_cut_hour_' + str(args.C)) params['model'] = 'Bi-DEFUSE_inoutw' elif (args.method == 'dp_1d'): params['loss'] = 'dp_loss' params['dataset'] = ('dp_v2_1d_pretrain_cut_hour_' + str(args.C)) params['model'] = 'MLP_dp' elif (args.method == 'DEFER'): params['loss'] = 'dp_loss' params['dataset'] = ('dp_pretrain_cut_hour_' + str(args.C)) params['model'] = 'MLP_dp' elif (args.method == 'DEFER_1d'): params['loss'] = 'dp_loss' params['dataset'] = ('dp_pretrain_1d_cut_hour_' + str(args.C)) params['model'] = 'MLP_dp' else: raise ValueError('{} method do not need pretraining other than Pretrain'.format(args.method)) elif (args.method == 'Pretrain'): params['loss'] = 'none_loss' params['dataset'] = 'last_30_train_test_oracle' elif (args.method == 'Pretrain_1d'): params['loss'] = 'none_loss' params['dataset'] = 'last_30_1d_train_test_oracle' elif (args.method == 'Oracle'): params['loss'] = 'cross_entropy_loss' params['dataset'] = 'last_30_train_test_oracle' elif (args.method == 'Oracle_1d'): params['loss'] = 'cross_entropy_loss' params['dataset'] = 'last_30_1d_train_test_oracle' elif (args.method == 'DFM'): params['loss'] = 'delayed_feedback_loss' params['dataset'] = 'last_30_train_test_dfm' elif (args.method == 'ES-DFM'): params['loss'] = 'esdfm_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_cut_hour_' + str(args.C)) elif (args.method == 'ES-DFM_1d'): params['loss'] = 'esdfm_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_1d_cut_hour_' + str(args.C)) elif (args.method == 'DEFUSE'): params['loss'] = 'defuse_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_cut_hour_' + str(args.C)) elif (args.method == 'DEFUSE_1d'): params['loss'] = 'defuse_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_1d_cut_hour_' + str(args.C)) elif (args.method == 'DEFUSE_3d'): params['loss'] = 'defuse_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_3d_cut_hour_' + str(args.C)) elif (args.method == 'DEFUSE_7d'): params['loss'] = 'defuse_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_7d_cut_hour_' + str(args.C)) elif (args.method == 'DEFUSE_14d'): params['loss'] = 'defuse_loss' params['dataset'] = ('last_30_train_test_esdfm_oracle_v2_14d_cut_hour_' + str(args.C)) elif (args.method == 'Bi-DEFUSE_MLP'): params['loss'] = 'bidefuse_loss' params['dataset'] = ('last_30_train_test_bidefuse_cut_hour_' + str(args.C)) elif (args.method == 'Bi-DEFUSE'): params['loss'] = 'bidefuse_loss' params['dataset'] = ('last_30_train_test_bidefuse_cut_hour_' + str(args.C)) elif (args.method == 'Bi-DEFUSE_1d'): params['loss'] = 'bidefuse_loss' params['dataset'] = ('last_30_train_test_bidefuse_1d_cut_hour_' + str(args.C)) elif (args.method == 'DEFER'): params['loss'] = 'defer_loss' params['dataset'] = 'last_30_train_test_defer_cut_hour_{}_attr_day_{}'.format(args.C, args.W) elif (args.method == 'DEFER_unbiased'): params['loss'] = 'unbiased_defer_loss' params['dataset'] = 'last_30_train_test_defer_cut_hour_{}_attr_day_{}'.format(args.C, args.W) elif (args.method == 'Vanilla'): params['loss'] = 'cross_entropy_loss' params['dataset'] = ('last_30_train_test_vanilla_cut_hour_' + str(args.C)) elif (args.method == 'Vanilla_1d'): params['loss'] = 'cross_entropy_loss' params['dataset'] = ('last_30_train_test_vanilla_1d_cut_hour_' + str(args.C)) elif (args.method == 'Vanilla-win'): params['loss'] = 'cross_entropy_loss' params['dataset'] = ('last_30_train_test_esdfm_cut_hour_' + str(args.C)) elif (args.method == 'Vanilla-win_1d'): params['loss'] = 'cross_entropy_loss' params['dataset'] = ('last_30_train_test_vanilla_1d_cut_hour_' + str(args.C)) elif (args.method == 'FNW'): params['loss'] = 'fake_negative_weighted_loss' params['dataset'] = 'last_30_train_test_fnw' elif (args.method == 'FNW_1d'): params['loss'] = 'fake_negative_weighted_loss' params['dataset'] = 'last_30_train_test_fnw_1d' elif (args.method == 'FNW_unbiased'): params['loss'] = 'unbiased_fake_negative_weighted_loss' params['dataset'] = 'last_30_train_test_fnw' elif (args.method == 'FNW_unbiased_1d'): params['loss'] = 'unbiased_fake_negative_weighted_loss' params['dataset'] = 'last_30_train_test_fnw_1d' elif (args.method == 'FNC'): params['loss'] = 'cross_entropy_loss' params['dataset'] = 'last_30_train_test_fnw' elif (args.method == 'FNC_1d'): params['loss'] = 'cross_entropy_loss' params['dataset'] = 'last_30_train_test_fnw_1d' return params
def test_registry(): registry = Registry() assert ('DIFF' not in registry) assert (len(registry) == 0) ('DIFF') def difference(a, b): return (a - b) assert ('DIFF' in registry) assert (len(registry) == 1) assert (registry['DIFF'] == difference) with pytest.raises(KeyError): registry['HEIGHT']
def test_field_renaming(converter: Converter): class A(): a: int class B(): a: int converter.register_structure_hook(B, make_dict_structure_fn(B, converter, a=override(rename='b'))) assert (converter.structure({'a': 1}, Union[(A, B)]) == A(1)) assert (converter.structure({'b': 1}, Union[(A, B)]) == B(1))
class TestTradingControls(zf.WithMakeAlgo, zf.ZiplineTestCase): START_DATE = pd.Timestamp('2006-01-03', tz='utc') END_DATE = pd.Timestamp('2006-01-06', tz='utc') sid = 133 sids = ASSET_FINDER_EQUITY_SIDS = (133, 134) SIM_PARAMS_DATA_FREQUENCY = 'daily' DATA_PORTAL_USE_MINUTE_DATA = True def init_class_fixtures(cls): super(TestTradingControls, cls).init_class_fixtures() cls.asset = cls.asset_finder.retrieve_asset(cls.sid) cls.another_asset = cls.asset_finder.retrieve_asset(134) def _check_algo(self, algo, expected_order_count, expected_exc): with (self.assertRaises(expected_exc) if expected_exc else nop_context): algo.run() self.assertEqual(algo.order_count, expected_order_count) def check_algo_succeeds(self, algo, order_count=4): self._check_algo(algo, order_count, None) def check_algo_fails(self, algo, order_count): self._check_algo(algo, order_count, TradingControlViolation) def test_set_max_position_size(self): def initialize(self, asset, max_shares, max_notional): self.set_slippage(FixedSlippage()) self.order_count = 0 self.set_max_position_size(asset=asset, max_shares=max_shares, max_notional=max_notional) def handle_data(algo, data): algo.order(algo.sid(self.sid), 1) algo.order_count += 1 algo = self.make_algo(asset=self.asset, max_shares=10, max_notional=500.0, initialize=initialize, handle_data=handle_data) self.check_algo_succeeds(algo) def handle_data(algo, data): algo.order(algo.sid(self.sid), 3) algo.order_count += 1 algo = self.make_algo(asset=self.asset, max_shares=10, max_notional=500.0, initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 3) def handle_data(algo, data): algo.order(algo.sid(self.sid), 3) algo.order_count += 1 algo = self.make_algo(asset=self.asset, max_shares=10, max_notional=67.0, initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 2) def handle_data(algo, data): algo.order(algo.sid(self.sid), 10000) algo.order_count += 1 algo = self.make_algo(asset=self.another_asset, max_shares=10, max_notional=67.0, initialize=initialize, handle_data=handle_data) self.check_algo_succeeds(algo) def handle_data(algo, data): algo.order(algo.sid(self.sid), 10000) algo.order_count += 1 algo = self.make_algo(max_shares=10, max_notional=61.0, asset=None, initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 0) def test_set_asset_restrictions(self): def initialize(algo, sid, restrictions, on_error): algo.order_count = 0 algo.set_asset_restrictions(restrictions, on_error) def handle_data(algo, data): algo.could_trade = data.can_trade(algo.sid(self.sid)) algo.order(algo.sid(self.sid), 100) algo.order_count += 1 rlm = HistoricalRestrictions([Restriction(self.sid, self.sim_params.start_session, RESTRICTION_STATES.FROZEN)]) algo = self.make_algo(sid=self.sid, restrictions=rlm, on_error='fail', initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 0) self.assertFalse(algo.could_trade) rlm = StaticRestrictions([self.sid]) algo = self.make_algo(sid=self.sid, restrictions=rlm, on_error='fail', initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 0) self.assertFalse(algo.could_trade) algo = self.make_algo(sid=self.sid, restrictions=rlm, on_error='log', initialize=initialize, handle_data=handle_data) with make_test_handler(self) as log_catcher: self.check_algo_succeeds(algo) logs = [r.message for r in log_catcher.records] self.assertIn('Order for 100 shares of Equity(133 [A]) at 2006-01-03 21:00:00+00:00 violates trading constraint RestrictedListOrder({})', logs) self.assertFalse(algo.could_trade) rlm = HistoricalRestrictions([Restriction(sid, self.sim_params.start_session, RESTRICTION_STATES.FROZEN) for sid in [134, 135, 136]]) algo = self.make_algo(sid=self.sid, restrictions=rlm, on_error='fail', initialize=initialize, handle_data=handle_data) self.check_algo_succeeds(algo) self.assertTrue(algo.could_trade) ([('order_first_restricted_sid', 0), ('order_second_restricted_sid', 1)]) def test_set_multiple_asset_restrictions(self, name, to_order_idx): def initialize(algo, restrictions1, restrictions2, on_error): algo.order_count = 0 algo.set_asset_restrictions(restrictions1, on_error) algo.set_asset_restrictions(restrictions2, on_error) def handle_data(algo, data): algo.could_trade1 = data.can_trade(algo.sid(self.sids[0])) algo.could_trade2 = data.can_trade(algo.sid(self.sids[1])) algo.order(algo.sid(self.sids[to_order_idx]), 100) algo.order_count += 1 rl1 = StaticRestrictions([self.sids[0]]) rl2 = StaticRestrictions([self.sids[1]]) algo = self.make_algo(restrictions1=rl1, restrictions2=rl2, initialize=initialize, handle_data=handle_data, on_error='fail') self.check_algo_fails(algo, 0) self.assertFalse(algo.could_trade1) self.assertFalse(algo.could_trade2) def test_set_do_not_order_list(self): def initialize(self, restricted_list): self.order_count = 0 self.set_do_not_order_list(restricted_list, on_error='fail') def handle_data(algo, data): algo.could_trade = data.can_trade(algo.sid(self.sid)) algo.order(algo.sid(self.sid), 100) algo.order_count += 1 rlm = [self.sid] algo = self.make_algo(restricted_list=rlm, initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 0) self.assertFalse(algo.could_trade) def test_set_max_order_size(self): def initialize(algo, asset, max_shares, max_notional): algo.order_count = 0 algo.set_max_order_size(asset=asset, max_shares=max_shares, max_notional=max_notional) def handle_data(algo, data): algo.order(algo.sid(self.sid), 1) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, asset=self.asset, max_shares=10, max_notional=500.0) self.check_algo_succeeds(algo) def handle_data(algo, data): algo.order(algo.sid(self.sid), (algo.order_count + 1)) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, asset=self.asset, max_shares=3, max_notional=500.0) self.check_algo_fails(algo, 3) def handle_data(algo, data): algo.order(algo.sid(self.sid), (algo.order_count + 1)) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, asset=self.asset, max_shares=10, max_notional=40.0) self.check_algo_fails(algo, 3) def handle_data(algo, data): algo.order(algo.sid(self.sid), 10000) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, asset=self.another_asset, max_shares=1, max_notional=1.0) self.check_algo_succeeds(algo) def handle_data(algo, data): algo.order(algo.sid(self.sid), 10000) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, asset=None, max_shares=1, max_notional=1.0) self.check_algo_fails(algo, 0) def test_set_max_order_count(self): def initialize(algo, count): algo.order_count = 0 algo.set_max_order_count(count) def handle_data(algo, data): for i in range(5): algo.order(self.asset, 1) algo.order_count += 1 algo = self.make_algo(count=3, initialize=initialize, handle_data=handle_data) with self.assertRaises(TradingControlViolation): algo.run() self.assertEqual(algo.order_count, 3) def test_set_max_order_count_minutely(self): sim_params = self.make_simparams(data_frequency='minute') def initialize(algo, max_orders_per_day): algo.minute_count = 0 algo.order_count = 0 algo.set_max_order_count(max_orders_per_day) def handle_data(algo, data): if ((algo.minute_count == 0) or (algo.minute_count == 100)): for i in range(5): algo.order(self.asset, 1) algo.order_count += 1 algo.minute_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, max_orders_per_day=9, sim_params=sim_params) with self.assertRaises(TradingControlViolation): algo.run() self.assertEqual(algo.order_count, 9) def handle_data(algo, data): if ((algo.minute_count % 390) == 0): for i in range(5): algo.order(self.asset, 1) algo.order_count += 1 algo.minute_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data, max_orders_per_day=5, sim_params=sim_params) algo.run() self.assertEqual(algo.order_count, 20) def test_long_only(self): def initialize(algo): algo.order_count = 0 algo.set_long_only() def handle_data(algo, data): algo.order(algo.sid(self.sid), (- 1)) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 0) def handle_data(algo, data): if ((algo.order_count % 2) == 0): algo.order(algo.sid(self.sid), 1) else: algo.order(algo.sid(self.sid), (- 1)) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data) self.check_algo_succeeds(algo) def handle_data(algo, data): amounts = [1, 1, 1, (- 3)] algo.order(algo.sid(self.sid), amounts[algo.order_count]) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data) self.check_algo_succeeds(algo) def handle_data(algo, data): amounts = [1, 1, 1, (- 4)] algo.order(algo.sid(self.sid), amounts[algo.order_count]) algo.order_count += 1 algo = self.make_algo(initialize=initialize, handle_data=handle_data) self.check_algo_fails(algo, 3) def test_register_post_init(self): def initialize(algo): algo.initialized = True def handle_data(algo, data): with self.assertRaises(RegisterTradingControlPostInit): algo.set_max_position_size(self.sid, 1, 1) with self.assertRaises(RegisterTradingControlPostInit): algo.set_max_order_size(self.sid, 1, 1) with self.assertRaises(RegisterTradingControlPostInit): algo.set_max_order_count(1) with self.assertRaises(RegisterTradingControlPostInit): algo.set_long_only() self.run_algorithm(initialize=initialize, handle_data=handle_data)
def is2_use(use): def factory(): from qiime2.core.testing.format import IntSequenceFormatV2 from qiime2.plugin.util import transform ff = transform([1, 2, 3], to_type=IntSequenceFormatV2) ff.validate() return ff to_import = use.init_format('to_import', factory, ext='.hello') use.import_from_format('ints', semantic_type='IntSequence2', variable=to_import, view_type='IntSequenceFormatV2')
def process_dis_batch_recur(input_filename_list, dim_input, num, reshape_with_one=False): img_list = [] random.seed(6) random.shuffle(input_filename_list) for filepath in input_filename_list: filepath2 = (FLAGS.data_path + filepath) this_img = scm.imread(filepath2) this_img = np.reshape(this_img, [(- 1), dim_input]) this_img = (this_img / 255.0) img_list.append(this_img) if reshape_with_one: img_array = np.array(img_list).reshape([1, num, dim_input]) else: img_array = np.array(img_list).reshape([num, dim_input]) return img_array
def test_scene_to_pixmap_exporter_export_with_worker(view, tmpdir): filename = os.path.join(tmpdir, 'foo.png') item_img = QtGui.QImage(1000, 1200, QtGui.QImage.Format.Format_RGB32) item = BeePixmapItem(item_img) view.scene.addItem(item) exporter = SceneToPixmapExporter(view.scene) exporter.size = QtCore.QSize(100, 120) worker = MagicMock(canceled=False) exporter.export(filename, worker) worker.begin_processing.emit.assert_called_once_with(1) worker.progress.emit.assert_called_once_with(1) worker.finished.emit.assert_called_once_with(filename, []) with open(filename, 'rb') as f: assert f.read().startswith(b'\x89PNG')
def assert_reversible_orbit(inst, iterations): n_time = [] p_time = [] control = inst.copy() for j in range(iterations): n_time.append(inst.index[0]) inst.orbits.next() for j in range(iterations): inst.orbits.prev() p_time.append(inst.index[0]) assert all((control.data == inst.data)) if (iterations < 30): assert np.all((p_time == n_time[::(- 1)])) return
class MapsGrid(): def __init__(self, r=2, c=2, crs=None, m_inits=None, ax_inits=None, figsize=None, layer='base', f=None, **kwargs): self._Maps = [] self._names = dict() if (WebMapContainer is not None): self._wms_container = WebMapContainer(self) gskwargs = dict(bottom=0.01, top=0.99, left=0.01, right=0.99) gskwargs.update(kwargs) self.gridspec = GridSpec(nrows=r, ncols=c, **gskwargs) if ((m_inits is None) and (ax_inits is None)): if isinstance(crs, list): crs = np.array(crs).reshape((r, c)) else: crs = np.broadcast_to(crs, (r, c)) self._custom_init = False for i in range(r): for j in range(c): crsij = crs[(i, j)] if isinstance(crsij, np.generic): crsij = crsij.item() if ((i == 0) and (j == 0)): mij = Maps(crs=crsij, ax=self.gridspec[(0, 0)], figsize=figsize, layer=layer, f=f) mij.ax.set_label('mg_map_0_0') self.parent = mij else: mij = Maps(crs=crsij, f=self.parent.f, ax=self.gridspec[(i, j)], layer=layer) mij.ax.set_label(f'mg_map_{i}_{j}') self._Maps.append(mij) name = f'{i}_{j}' self._names.setdefault('Maps', []).append(name) setattr(self, ('m_' + name), mij) else: self._custom_init = True if (m_inits is not None): if (not isinstance(crs, dict)): if isinstance(crs, np.generic): crs = crs.item() crs = {key: crs for key in m_inits} assert self._test_unique_str_keys(m_inits), 'EOmaps: there are duplicated keys in m_inits!' for (i, [key, val]) in enumerate(m_inits.items()): if (ax_inits is not None): q = set(m_inits).intersection(set(ax_inits)) assert (len(q) == 0), f'You cannot provide duplicate keys! Check: {q}' if (i == 0): mi = Maps(crs=crs[key], ax=self.gridspec[val], figsize=figsize, layer=layer, f=f) mi.ax.set_label(f'mg_map_{key}') self.parent = mi else: mi = Maps(crs=crs[key], ax=self.gridspec[val], layer=layer, f=self.parent.f) mi.ax.set_label(f'mg_map_{key}') name = str(key) self._names.setdefault('Maps', []).append(name) self._Maps.append(mi) setattr(self, f'm_{name}', mi) if (ax_inits is not None): assert self._test_unique_str_keys(ax_inits), 'EOmaps: there are duplicated keys in ax_inits!' for (key, val) in ax_inits.items(): self.create_axes(val, name=key) def new_layer(self, layer=None): if (layer is None): layer = self.parent.layer mg = MapsGrid(m_inits=dict()) mg.gridspec = self.gridspec for (name, m) in zip(self._names.get('Maps', []), self._Maps): newm = m.new_layer(layer) mg._Maps.append(newm) mg._names['Maps'].append(name) setattr(mg, ('m_' + name), newm) if (m is self.parent): mg.parent = newm for name in self._names.get('Axes', []): ax = getattr(self, f'ax_{name}') mg._names['Axes'].append(name) setattr(mg, f'ax_{name}', ax) return mg def cleanup(self): for m in self: m.cleanup() def _test_unique_str_keys(x): seen = set() return (not any((((str(i) in seen) or seen.add(str(i))) for i in x))) def __iter__(self): return iter(self._Maps) def __getitem__(self, key): try: if (self._custom_init is False): if isinstance(key, str): (r, c) = map(int, key.split('_')) elif isinstance(key, (list, tuple)): (r, c) = key else: raise IndexError(f'{key} is not a valid indexer for MapsGrid') return getattr(self, f'm_{r}_{c}') elif (str(key) in self._names.get('Maps', [])): return getattr(self, ('m_' + str(key))) elif (str(key) in self._names.get('Axes', [])): return getattr(self, ('ax_' + str(key))) else: raise IndexError(f'{key} is not a valid indexer for MapsGrid') except: raise IndexError(f'{key} is not a valid indexer for MapsGrid') def _preferred_wms_service(self): return self.parent._preferred_wms_service def create_axes(self, ax_init, name=None): if (name is None): axes = [key for key in self.__dict__ if key.startswith('ax_')] name = str(len(axes)) else: assert name.isidentifier(), f'the provided name {name} is not a valid identifier' ax = self.f.add_subplot(self.gridspec[ax_init], label=f'mg_ax_{name}') self._names.setdefault('Axes', []).append(name) setattr(self, f'ax_{name}', ax) return ax _doc_prefix = ('This will execute the corresponding action on ALL Maps ' + 'objects of the MapsGrid!\n') def children(self): return [i for i in self if (i is not self.parent)] def f(self): return self.parent.f (Maps.plot_map) def plot_map(self, **kwargs): for m in self: m.plot_map(**kwargs) plot_map.__doc__ = (_doc_prefix + plot_map.__doc__) _cache() (Shapes) def set_shape(self): s = Shapes(self) s.__doc__ = (self._doc_prefix + s.__doc__) return s (Maps.set_data) def set_data(self, *args, **kwargs): for m in self: m.set_data(*args, **kwargs) set_data.__doc__ = (_doc_prefix + set_data.__doc__) (Maps.set_classify_specs) def set_classify_specs(self, scheme=None, **kwargs): for m in self: m.set_classify_specs(scheme=scheme, **kwargs) set_classify_specs.__doc__ = (_doc_prefix + set_classify_specs.__doc__) (Maps.add_annotation) def add_annotation(self, *args, **kwargs): for m in self: m.add_annotation(*args, **kwargs) add_annotation.__doc__ = (_doc_prefix + add_annotation.__doc__) (Maps.add_marker) def add_marker(self, *args, **kwargs): for m in self: m.add_marker(*args, **kwargs) add_marker.__doc__ = (_doc_prefix + add_marker.__doc__) if hasattr(Maps, 'add_wms'): (Maps.add_wms) def add_wms(self): return self._wms_container (Maps.add_feature) def add_feature(self): x = NaturalEarth_features(self) return x (Maps.add_gdf) def add_gdf(self, *args, **kwargs): for m in self: m.add_gdf(*args, **kwargs) add_gdf.__doc__ = (_doc_prefix + add_gdf.__doc__) (Maps.add_line) def add_line(self, *args, **kwargs): for m in self: m.add_line(*args, **kwargs) add_line.__doc__ = (_doc_prefix + add_line.__doc__) (Maps.add_scalebar) def add_scalebar(self, *args, **kwargs): for m in self: m.add_scalebar(*args, **kwargs) add_scalebar.__doc__ = (_doc_prefix + add_scalebar.__doc__) (Maps.add_compass) def add_compass(self, *args, **kwargs): for m in self: m.add_compass(*args, **kwargs) add_compass.__doc__ = (_doc_prefix + add_compass.__doc__) (Maps.add_colorbar) def add_colorbar(self, *args, **kwargs): for m in self: m.add_colorbar(*args, **kwargs) add_colorbar.__doc__ = (_doc_prefix + add_colorbar.__doc__) (Maps.add_logo) def add_logo(self, *args, **kwargs): for m in self: m.add_logo(*args, **kwargs) add_colorbar.__doc__ = (_doc_prefix + add_logo.__doc__) def share_click_events(self): self.parent.cb.click.share_events(*self.children) def share_move_events(self): self.parent.cb.move.share_events(*self.children) def share_pick_events(self, name='default'): if (name == 'default'): self.parent.cb.pick.share_events(*self.children) else: self.parent.cb.pick[name].share_events(*self.children) def join_limits(self): self.parent.join_limits(*self.children) (Maps.redraw) def redraw(self, *args): self.parent.redraw(*args) (plt.savefig) def savefig(self, *args, **kwargs): self.parent.BM._refetch_bg = True self.parent.savefig(*args, **kwargs) (Maps.util) def util(self): return self.parent.util (Maps.subplots_adjust) def subplots_adjust(self, **kwargs): return self.parent.subplots_adjust(**kwargs) (Maps.get_layout) def get_layout(self, *args, **kwargs): return self.parent.get_layout(*args, **kwargs) (Maps.apply_layout) def apply_layout(self, *args, **kwargs): return self.parent.apply_layout(*args, **kwargs) (Maps.edit_layout) def edit_layout(self, *args, **kwargs): return self.parent.edit_layout(*args, **kwargs) (Maps.show) def show(self, *args, **kwargs): return self.parent.show(*args, **kwargs) (Maps.snapshot) def snapshot(self, *args, **kwargs): return self.parent.snapshot(*args, **kwargs)
class _ADTSStream(object): parsed_frames = 0 offset = 0 def find_stream(cls, fileobj, max_bytes): r = BitReader(fileobj) stream = cls(r) if stream.sync(max_bytes): stream.offset = ((r.get_position() - 12) // 8) return stream def sync(self, max_bytes): max_bytes = max(max_bytes, 2) r = self._r r.align() while (max_bytes > 0): try: b = r.bytes(1) if (b == b'\xff'): if (r.bits(4) == 15): return True r.align() max_bytes -= 2 else: max_bytes -= 1 except BitReaderError: return False return False def __init__(self, r): self._fixed_header_key = None self._r = r self.offset = (- 1) self.parsed_frames = 0 self._samples = 0 self._payload = 0 self._start = (r.get_position() / 8) self._last = self._start def bitrate(self): assert self.parsed_frames, 'no frame parsed yet' if (self._samples == 0): return 0 return (((8 * self._payload) * self.frequency) // self._samples) def samples(self): assert self.parsed_frames, 'no frame parsed yet' return self._samples def size(self): assert self.parsed_frames, 'no frame parsed yet' return (self._last - self._start) def channels(self): assert self.parsed_frames, 'no frame parsed yet' b_index = self._fixed_header_key[6] if (b_index == 7): return 8 elif (b_index > 7): return 0 else: return b_index def frequency(self): assert self.parsed_frames, 'no frame parsed yet' f_index = self._fixed_header_key[4] try: return _FREQS[f_index] except IndexError: return 0 def parse_frame(self): try: return self._parse_frame() except BitReaderError: return False def _parse_frame(self): r = self._r start = (r.get_position() - 12) id_ = r.bits(1) layer = r.bits(2) protection_absent = r.bits(1) profile = r.bits(2) sampling_frequency_index = r.bits(4) private_bit = r.bits(1) channel_configuration = r.bits(3) original_copy = r.bits(1) home = r.bits(1) fixed_header_key = (id_, layer, protection_absent, profile, sampling_frequency_index, private_bit, channel_configuration, original_copy, home) if (self._fixed_header_key is None): self._fixed_header_key = fixed_header_key elif (self._fixed_header_key != fixed_header_key): return False r.skip(2) frame_length = r.bits(13) r.skip(11) nordbif = r.bits(2) crc_overhead = 0 if (not protection_absent): crc_overhead += ((nordbif + 1) * 16) if (nordbif != 0): crc_overhead *= 2 left = ((frame_length * 8) - (r.get_position() - start)) if (left < 0): return False r.skip(left) assert r.is_aligned() self._payload += ((left - crc_overhead) / 8) self._samples += ((nordbif + 1) * 1024) self._last = (r.get_position() / 8) self.parsed_frames += 1 return True
class RegisterFileRst(Component): def construct(s, Type, nregs=32, rd_ports=1, wr_ports=1, const_zero=False, reset_value=0): addr_type = mk_bits(max(1, clog2(nregs))) s.raddr = [InPort(addr_type) for i in range(rd_ports)] s.rdata = [OutPort(Type) for i in range(rd_ports)] s.waddr = [InPort(addr_type) for i in range(wr_ports)] s.wdata = [InPort(Type) for i in range(wr_ports)] s.wen = [InPort(Bits1) for i in range(wr_ports)] s.regs = [Wire(Type) for i in range(nregs)] def up_rf_read(): for i in range(rd_ports): s.rdata[i] = s.regs[s.raddr[i]] if const_zero: _ff def up_rf_write_constzero(): if s.reset: for i in range(nregs): s.regs[i] <<= reset_value else: for i in range(wr_ports): if (s.wen[i] & (s.waddr[i] != 0)): s.regs[s.waddr[i]] <<= s.wdata[i] else: _ff def up_rf_write(): if s.reset: for i in range(nregs): s.regs[i] <<= reset_value else: for i in range(wr_ports): if s.wen[i]: s.regs[s.waddr[i]] <<= s.wdata[i]
def get_type_desktop(): cmd_status_gui = Command(shlex.split('systemctl get-default')) status = 0 try: status_gui = cmd_status_gui()[0] if (status_gui == 'multi-user.target'): status = 1 if os.path.isfile('/etc/systemd/system/.service.d/autologin.conf'): status = 2 elif (status_gui == 'graphical.target'): status = 3 if os.path.isfile('/etc/gdm3/custom.conf'): with open('/etc/gdm3/custom.conf', 'r') as f: for line in f.readlines(): if GUI_GRAPHIC_RE.match(line): status = 4 break except (OSError, Command.CommandException): pass return status
class ManiSkill2Dataset(Dataset): def __init__(self, dataset_file: str, load_count=(- 1)) -> None: self.dataset_file = dataset_file import h5py from mani_skill2.utils.io_utils import load_json self.data = h5py.File(dataset_file, 'r') json_path = dataset_file.replace('.h5', '.json') self.json_data = load_json(json_path) self.episodes = self.json_data['episodes'] self.env_info = self.json_data['env_info'] self.env_id = self.env_info['env_id'] self.env_kwargs = self.env_info['env_kwargs'] self.observations = [] self.actions = [] self.total_frames = 0 if (load_count == (- 1)): load_count = len(self.episodes) for eps_id in tqdm(range(load_count)): eps = self.episodes[eps_id] trajectory = self.data[f"traj_{eps['episode_id']}"] trajectory = load_h5_data(trajectory) self.observations.append(trajectory['obs'][:(- 1)]) self.actions.append(trajectory['actions']) self.observations = np.vstack(self.observations) self.actions = np.vstack(self.actions) def __len__(self): return len(self.observations) def __getitem__(self, idx): action = th.from_numpy(self.actions[idx]).float() obs = th.from_numpy(self.observations[idx]).float() return (obs, action)
class Encoder(Ranker): def __init__(self, on: typing.Union[(str, typing.List[str])], key: str, encoder, normalize: bool=True, k: typing.Optional[int]=None, batch_size: int=64) -> None: super().__init__(key=key, on=on, encoder=encoder, normalize=normalize, k=k, batch_size=batch_size) def __call__(self, q: typing.Union[(typing.List[str], str)], documents: typing.Union[(typing.List[typing.List[typing.Dict[(str, str)]]], typing.List[typing.Dict[(str, str)]])], k: typing.Optional[int]=None, batch_size: typing.Optional[int]=None, **kwargs) -> typing.Union[(typing.List[typing.List[typing.Dict[(str, str)]]], typing.List[typing.Dict[(str, str)]])]: if (k is None): k = self.k if (k is None): k = len(self) if ((not documents) and isinstance(q, str)): return [] if ((not documents) and isinstance(q, list)): return [[]] rank = self.encode_rank(embeddings_queries=self.encoder(([q] if isinstance(q, str) else q)), documents=([documents] if isinstance(q, str) else documents), k=k, batch_size=(batch_size if (batch_size is not None) else self.batch_size)) return (rank[0] if isinstance(q, str) else rank)
def open_circuit_potential(c_surf): stretch = 1.062 sto = ((stretch * c_surf) / c_max) u_eq = ((((((2.16216 + (0.07645 * tanh((30.834 - (54.4806 * sto))))) + (2.1581 * tanh((52.294 - (50.294 * sto))))) - (0.14169 * tanh((11.0923 - (19.8543 * sto))))) + (0.2051 * tanh((1.4684 - (5.4888 * sto))))) + (0.2531 * tanh((((- sto) + 0.56478) / 0.1316)))) - (0.02167 * tanh(((sto - 0.525) / 0.006)))) return u_eq
def encode_report(rpt, rpt_path): rpt_dict = {} parcels = spatial.read_shapefile(sg.config.parcels_shapefile) parcels = parcels[['PARCELID', 'coords']] flooded = rpt.alt_report.parcel_flooding flooded = pd.merge(flooded, parcels, right_on='PARCELID', left_index=True) rpt_dict['parcels'] = spatial.write_geojson(flooded, geomtype='polygon') delta_parcels = rpt.flood_comparison.loc[pd.notnull(rpt.flood_comparison.Category)] delta_parcels = pd.merge(delta_parcels, parcels, right_on='PARCELID', left_index=True) rpt_dict['delta_parcels'] = spatial.write_geojson(delta_parcels, geomtype='polygon') rpt_dict['new_conduits'] = spatial.write_geojson(rpt.newconduits) rpt_dict.update(rpt.summary_dict) with open(rpt_path, 'w') as f: f.write(json.dumps(rpt_dict))
class Infraction(ModelReprMixin, models.Model): TYPE_CHOICES = (('note', 'Note'), ('warning', 'Warning'), ('watch', 'Watch'), ('timeout', 'Timeout'), ('kick', 'Kick'), ('ban', 'Ban'), ('superstar', 'Superstar'), ('voice_ban', 'Voice Ban'), ('voice_mute', 'Voice Mute')) inserted_at = models.DateTimeField(default=timezone.now, help_text='The date and time of the creation of this infraction.') last_applied = models.DateTimeField(default=timezone.now, help_text='The date and time of when this infraction was last applied.') expires_at = models.DateTimeField(null=True, help_text="The date and time of the expiration of this infraction. Null if the infraction is permanent or it can't expire.") active = models.BooleanField(help_text='Whether the infraction is still active.') user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='infractions_received', help_text='The user to which the infraction was applied.') actor = models.ForeignKey(User, on_delete=models.CASCADE, related_name='infractions_given', help_text='The user which applied the infraction.') type = models.CharField(max_length=10, choices=TYPE_CHOICES, help_text='The type of the infraction.') reason = models.TextField(null=True, help_text='The reason for the infraction.') hidden = models.BooleanField(default=False, help_text='Whether the infraction is a shadow infraction.') dm_sent = models.BooleanField(null=True, help_text='Whether a DM was sent to the user when infraction was applied.') jump_url = models.URLField(default=None, null=True, max_length=88, help_text='The jump url to message invoking the infraction.') class Meta(): ordering = ['-inserted_at'] constraints = (models.UniqueConstraint(fields=['user', 'type'], condition=models.Q(active=True), name='unique_active_infraction_per_type_per_user'),) def __str__(self): s = f'#{self.id}: {self.type} on {self.user_id}' if self.expires_at: s += f' until {self.expires_at}' if self.hidden: s += ' (hidden)' return s
class SubscriptionHandler(object): def __init__(self, obj): self.obj = obj self._cache = None def _recache(self): self._cache = {account: True for account in self.obj.db_account_subscriptions.all() if (hasattr(account, 'pk') and account.pk)} self._cache.update({obj: True for obj in self.obj.db_object_subscriptions.all() if (hasattr(obj, 'pk') and obj.pk)}) def has(self, entity): if (self._cache is None): self._recache() return (entity in self._cache) def add(self, entity): global _CHANNELHANDLER if (not _CHANNELHANDLER): from evennia.comms.channelhandler import CHANNEL_HANDLER as _CHANNELHANDLER for subscriber in make_iter(entity): if subscriber: clsname = subscriber.__dbclass__.__name__ if (clsname == 'ObjectDB'): self.obj.db_object_subscriptions.add(subscriber) elif (clsname == 'AccountDB'): self.obj.db_account_subscriptions.add(subscriber) _CHANNELHANDLER._cached_cmdsets.pop(subscriber, None) self._recache() def remove(self, entity): global _CHANNELHANDLER if (not _CHANNELHANDLER): from evennia.comms.channelhandler import CHANNEL_HANDLER as _CHANNELHANDLER for subscriber in make_iter(entity): if subscriber: clsname = subscriber.__dbclass__.__name__ if (clsname == 'AccountDB'): self.obj.db_account_subscriptions.remove(entity) elif (clsname == 'ObjectDB'): self.obj.db_object_subscriptions.remove(entity) _CHANNELHANDLER._cached_cmdsets.pop(subscriber, None) self._recache() def all(self): if (self._cache is None): self._recache() return self._cache get = all def online(self): subs = [] recache_needed = False for obj in self.all(): from django.core.exceptions import ObjectDoesNotExist try: if (hasattr(obj, 'account') and obj.account): obj = obj.account if (not obj.is_connected): continue except ObjectDoesNotExist: recache_needed = True continue subs.append(obj) if recache_needed: self._recache() return subs def clear(self): self.obj.db_account_subscriptions.clear() self.obj.db_object_subscriptions.clear() self._cache = None
(scope='session', autouse=True) def symbols_by_file() -> Dict[(str, Set[str])]: sys.stdout = StringIO() lint.Run(['--reports=n', '--rcfile=python_ta/config/.pylintrc', '--output-format=json', *get_file_paths()], exit=False) jsons_output = sys.stdout.getvalue() sys.stdout = sys.__stdout__ pylint_list_output = json.loads(jsons_output) file_to_symbol = {} for (path, group) in itertools.groupby(pylint_list_output, key=(lambda d: d['path'])): symbols = {message['symbol'] for message in group} file = os.path.basename(path) file_to_symbol[file] = symbols return file_to_symbol
class Attention(nn.Module): def __init__(self, dim, num_heads=8, sr_ratio=1): super().__init__() self.num_heads = num_heads head_dim = (dim // num_heads) self.scale = (head_dim ** (- 0.5)) self.dim = dim self.q = nn.Linear(dim, dim, bias=True) self.kv = nn.Linear(dim, (dim * 2), bias=True) self.sr_ratio = sr_ratio if (sr_ratio > 1): self.sr = nn.Conv2d(dim, dim, kernel_size=(sr_ratio + 1), stride=sr_ratio, padding=(sr_ratio // 2), groups=dim) self.sr_norm = nn.LayerNorm(dim, eps=1e-06) self.up = nn.Sequential(nn.Conv2d(dim, ((sr_ratio * sr_ratio) * dim), kernel_size=3, stride=1, padding=1, groups=dim), nn.PixelShuffle(upscale_factor=sr_ratio)) self.up_norm = nn.LayerNorm(dim, eps=1e-06) self.proj = nn.Linear(dim, dim) def forward(self, x, H, W): (B, N, C) = x.shape q = self.q(x).reshape(B, N, self.num_heads, (C // self.num_heads)).permute(0, 2, 1, 3) if (self.sr_ratio > 1): x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(x).reshape(B, C, (- 1)).permute(0, 2, 1) x = self.sr_norm(x) kv = self.kv(x).reshape(B, (- 1), 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4) (k, v) = (kv[0], kv[1]) attn = ((q k.transpose((- 2), (- 1))) * self.scale) attn = attn.softmax(dim=(- 1)) x = (attn v).transpose(1, 2).reshape(B, N, C) identity = v.transpose((- 1), (- 2)).reshape(B, C, (H // self.sr_ratio), (W // self.sr_ratio)) identity = self.up(identity).flatten(2).transpose(1, 2) x = self.proj((x + self.up_norm(identity))) return x
def _test_false_cyclic_dependency(): class Top(Component): def construct(s): s.a = Wire(int) s.b = Wire(int) s.c = Wire(int) s.d = Wire(int) s.e = Wire(int) s.f = Wire(int) s.g = Wire(int) s.h = Wire(int) s.i = Wire(int) s.j = Wire(int) def up1(): s.a = (10 + s.i) s.b = (s.d + 1) def up2(): s.c = (s.a + 1) s.e = (s.d + 1) def up3(): s.d = (s.c + 1) print('up3 prints out d =', s.d) def up4(): s.f = (s.d + 1) def up5(): s.g = (s.c + 1) s.h = (s.j + 1) print('up5 prints out h =', s.h) def up6(): s.i = (s.i + 1) def up7(): s.j = (s.g + 1) def done(s): return True def line_trace(s): return 'a {} | b {} | c {} | d {} | e {} | f {} | g {} | h {} | i {} | j {}'.format(s.a, s.b, s.c, s.d, s.e, s.f, s.g, s.h, s.i, s.j) try: _test_model(Top) except UpblkCyclicError as e: print('{} is thrown\n{}'.format(e.__class__.__name__, e)) return raise Exception("Should've thrown UpblkCyclicError.")
def flatten_settings(json: dict) -> dict: settings = json.pop('settings', {}) flattened_settings = {} for (entry, value) in settings.items(): if isinstance(value, dict): flattened_settings.update(value) else: flattened_settings[entry] = value json.update(flattened_settings) return json
def eval_auto_attack(model, device, cfgs, logger, test_loader, individual=False, print_freq=20, mode='test', train_val=False): logger.info('Evaluating Auto Attack!') model.eval() attacks_to_run = ['apgd-ce', 'apgd-t'] adversary = AutoAttack(model, norm='Linf', eps=cfgs.test_epsilon, version='standard', verbose=False) adversary.attacks_to_run = attacks_to_run total_batches = len(test_loader) all_labels = [] all_outputs = dict() for (i, (data, target)) in enumerate(test_loader): (data, target) = (data.to(device), target.to(device)) outputs = dict() if individual: adversary.attacks_to_run = attacks_to_run x_adv_dict = adversary.run_standard_evaluation_individual(data, target, bs=len(data)) adversary.attacks_to_run = ['apgd-ce', 'apgd-t', 'fab-t', 'square'] for (attack, x_adv) in x_adv_dict.items(): outputs.update({attack: model(x_adv).detach().cpu().numpy()}) else: x_adv = adversary.run_standard_evaluation(data, target, bs=len(data)) outputs.update({'standard': model(x_adv).detach().cpu().numpy()}) all_labels.extend(target.cpu().detach().numpy().tolist()) for key in outputs.keys(): if (key not in all_outputs.keys()): all_outputs.update({key: []}) all_outputs[key].append(outputs[key]) else: all_outputs[key].append(outputs[key]) if ((i == 5) or (((i + 1) % print_freq) == 0)): logger.info('[{}/{}] batches finished'.format((i + 1), total_batches)) for (key, data) in all_outputs.items(): if (('features' in key) or ('success' in key)): continue data = np.vstack(data) (accuracy, precision, recall, f_score, true_sum) = eval_metrics(data, all_labels) logger.info('metric: {} | accuracy: {:.4f} | precision: {:.4f} | recall: {:.4f} | f_score: {:.4f} | '.format(key, accuracy, np.mean(precision), np.mean(recall), np.mean(f_score))) if train_val: return csv_name = '{}_all_results.csv'.format(cfgs.dataset) save_csv(csv_name, [[cfgs.model_path], [cfgs.remark]], devide=False) for (key, data) in all_outputs.items(): save_data = [[' '], [key]] if ('success' in key): data = np.hstack(data) else: data = np.vstack(data) all_outputs[key] = data if (('features' in key) or ('success' in key)): continue (accuracy, precision, recall, f_score, true_sum) = eval_metrics(data, all_labels) if (recall.shape[0] >= 100): g_recall = np.reshape(recall, (10, (- 1))).sum((- 1)) g_precision = np.reshape(precision, (10, (- 1))).sum((- 1)) g_f_score = np.reshape(f_score, (10, (- 1))).sum((- 1)) save_data.extend([['accuracy', accuracy], ['g_recall'], g_recall.tolist(), ['g_precision'], g_precision.tolist(), ['g_f_score'], g_f_score.tolist()]) save_data.extend([['accuracy', accuracy], ['recall'], recall.tolist(), ['precision'], precision.tolist(), ['f_score'], f_score.tolist()]) save_csv(csv_name, save_data, devide=False) logger.info('metric: {} | accuracy: {:.4f} | precision: {:.4f} | recall: {:.4f} | f_score: {:.4f} | '.format(key, accuracy, np.mean(precision), np.mean(recall), np.mean(f_score))) for (name, value) in zip(['precision', 'recall', 'f_score'], [precision, recall, f_score]): print(name, end=' | ') for v in value: print('{:.2f}'.format((v * 100)), end=' ') print() logger.info('[Remarks] {} | End of evaluation,model path {}'.format(cfgs.remark, cfgs.model_path)) for (name, param) in model.classifier.state_dict().items(): if ('weight' in name): print('save classifier weight from module <{}> !'.format(name)) all_outputs.update(CLASSIFIER_weight=model.classifier.state_dict()[name].clone().detach().cpu().numpy()) all_outputs.update(LABLES=all_labels) mmcv.dump(all_outputs, (cfgs.model_path + '.AA.{}.pkl'.format(mode))) print('Data saved at {}'.format((cfgs.model_path + 'AA.{}.pkl'.format(mode)))) return
def get_predictions(example, features, all_results, n_best_size, max_answer_length, do_lower_case, version_2_with_negative, null_score_diff_threshold): unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit']) prelim_predictions = [] score_null = 1000000 min_null_feature_index = 0 null_start_logit = 0 null_end_logit = 0 for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) if version_2_with_negative: feature_null_score = (result.start_logits[0] + result.end_logits[0]) if (feature_null_score < score_null): score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: if (start_index >= len(feature.tokens)): continue if (end_index >= len(feature.tokens)): continue if (start_index not in feature.token_to_orig_map): continue if (end_index not in feature.token_to_orig_map): continue if (not feature.token_is_max_context.get(start_index, False)): continue if (end_index < start_index): continue length = ((end_index - start_index) + 1) if (length > max_answer_length): continue prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True) _NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit']) seen_predictions = {} nbest = [] for pred in prelim_predictions: if (len(nbest) >= n_best_size): break feature = features[pred.feature_index] if (pred.start_index > 0): tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = ' '.join(tok_tokens) tok_text = tok_text.replace(' ##', '') tok_text = tok_text.replace('##', '') tok_text = tok_text.strip() tok_text = ' '.join(tok_text.split()) orig_text = ' '.join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, False) if ((final_text in seen_predictions) or ('MASK' in final_text) or ('[' in final_text) or (']' in final_text) or (len(final_text) < 2) or (len(final_text) < 2) or (len(final_text.split()) > 6) or (final_text.lower() in articles)): continue seen_predictions[final_text] = True else: final_text = '' seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) if (not nbest): nbest.append(_NbestPrediction(text='', start_logit=0.0, end_logit=0.0)) assert (len(nbest) >= 1) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append((entry.start_logit + entry.end_logit)) if (not best_non_null_entry): if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output['text'] = entry.text output['probability'] = probs[i] output['start_logit'] = entry.start_logit output['end_logit'] = entry.end_logit nbest_json.append(output) assert (len(nbest_json) >= 1) if (not version_2_with_negative): all_predictions[example.qas_id] = nbest_json[0]['text'] else: score_diff = ((score_null - best_non_null_entry.start_logit) - best_non_null_entry.end_logit) if (score_diff > null_score_diff_threshold): return (None, None) else: return (nbest, probs)
.parametrize('name', 'test tests whatever .dotdir'.split()) def test_setinitial_conftest_subdirs(pytester: Pytester, name: str) -> None: sub = pytester.mkdir(name) subconftest = sub.joinpath('conftest.py') subconftest.touch() pm = PytestPluginManager() conftest_setinitial(pm, [sub.parent], confcutdir=pytester.path) key = subconftest.resolve() if (name not in ('whatever', '.dotdir')): assert pm.has_plugin(str(key)) assert (len((set(pm.get_plugins()) - {pm})) == 1) else: assert (not pm.has_plugin(str(key))) assert (len((set(pm.get_plugins()) - {pm})) == 0)
(cc=STDCALL, params={'pIdentifierAuthority': PSID_IDENTIFIER_AUTHORITY, 'nSubAuthorityCount': BYTE, 'nSubAuthority0': DWORD, 'nSubAuthority1': DWORD, 'nSubAuthority2': DWORD, 'nSubAuthority3': DWORD, 'nSubAuthority4': DWORD, 'nSubAuthority5': DWORD, 'nSubAuthority6': DWORD, 'nSubAuthority7': DWORD, 'pSid': POINTER}) def hook_AllocateAndInitializeSid(ql: Qiling, address: int, params): count = params['nSubAuthorityCount'] subauths = tuple((params[f'nSubAuthority{i}'] for i in range(count))) sid_struct = make_sid(auth_count=len(subauths)) sid_addr = ql.os.heap.alloc(sid_struct.sizeof()) sid_obj = sid_struct(Revision=1, SubAuthorityCount=len(subauths), IdentifierAuthority=(5,), SubAuthority=subauths) sid_obj.save_to(ql.mem, sid_addr) handle = Handle(obj=sid_obj, id=sid_addr) ql.os.handle_manager.append(handle) dest = params['pSid'] ql.mem.write_ptr(dest, sid_addr) return 1
class KNearestPMedian(PMedian): def __init__(self, name: str, ai_sum: (int | float), clients: np.array, facilities: np.array, weights: np.array, k_array: np.array, p_facilities: int, capacities: np.array=None, distance_metric: str='euclidean'): self.ai_sum = ai_sum self.clients = clients self.facilities = facilities self.weights = weights self.k_array = k_array self.p_facilities = p_facilities self.capacities = capacities self.distance_metric = distance_metric self.name = name def __add_obj(self, max_distance: np.array, range_clients: range, range_facility: range) -> None: cli_assgn_vars = getattr(self, 'cli_assgn_vars') placeholder_vars = getattr(self, 'placeholder_vars') self.problem += (pulp.lpSum(((pulp.lpSum(((self.aij[(i, j)] * cli_assgn_vars.get((i, j), 0)) for j in range_facility)) + (placeholder_vars[i] * (max_distance[i] + 1))) for i in range_clients)), 'objective function') def from_cost_matrix(cls, *args, **kwargs): raise NotImplementedError('The `from_cost_matrix()` method is not supported in `KNearestPMedian` class.') def _create_sparse_matrix(self) -> None: row_shape = len(self.clients) column_shape = len(self.facilities) if (not (self.k_array <= column_shape).all()): raise ValueError(f'The value of `k` should be no more than the number of total facilities: ({column_shape}).') data = [] row_index = [] col_index = [] tree = build_best_tree(self.facilities, self.distance_metric) for (i, k) in enumerate(self.k_array): (distances, k_nearest_facilities_indices) = tree.query([self.clients[i]], k=k) distances = distances[0].tolist() k_nearest_facilities_indices = k_nearest_facilities_indices[0].tolist() data.extend(distances) row_index.extend(([i] * k)) col_index.extend(k_nearest_facilities_indices) self.sparse_matrix = csr_matrix((data, (row_index, col_index)), shape=(row_shape, column_shape)) def _update_k_array(self) -> None: new_k_array = self.k_array.copy() placeholder_vars = getattr(self, 'placeholder_vars') for i in range(len(placeholder_vars)): if (placeholder_vars[i].value() > 0): new_k_array[i] = (new_k_array[i] + 1) self.k_array = new_k_array def _from_sparse_matrix(self) -> None: n_cli = self.sparse_matrix.shape[0] r_cli = range(n_cli) r_fac = range(self.sparse_matrix.shape[1]) self.weights = np.reshape(self.weights, (n_cli, 1)) self.aij = self.sparse_matrix.multiply(self.weights).tocsr() self.problem = pulp.LpProblem(self.name, pulp.LpMinimize) FacilityModelBuilder.add_facility_integer_variable(self, r_fac, 'y[{i}]') fac_vars = getattr(self, 'fac_vars') placeholder_vars = pulp.LpVariable.dicts('g', (i for i in r_cli), 0, 1, pulp.LpBinary) setattr(self, 'placeholder_vars', placeholder_vars) (row_indices, col_indices, values) = find(self.aij) cli_assgn_vars = pulp.LpVariable.dicts('z', list(zip(row_indices, col_indices, strict=True)), 0, 1, pulp.LpBinary) setattr(self, 'cli_assgn_vars', cli_assgn_vars) max_distance = self.aij.max(axis=1).toarray().flatten() self.__add_obj(max_distance, r_cli, r_fac) if (self.capacities is not None): sorted_capacities = np.sort(self.capacities) highest_possible_capacity = sorted_capacities[(- self.p_facilities):].sum() if (highest_possible_capacity < self.ai_sum): raise SpecificationError(f'Problem is infeasible. The highest possible capacity ({highest_possible_capacity}), coming from the {self.p_facilities} sites with the highest capacity, is smaller than the total demand ({self.ai_sum}).') for j in col_indices: self.problem += (pulp.lpSum(((self.weights[i] * cli_assgn_vars.get((i, j), 0)) for i in r_cli)) <= (fac_vars[j] * self.capacities[j])) for i in r_cli: self.problem += ((pulp.lpSum((cli_assgn_vars.get((i, j), 0) for j in set(col_indices))) + placeholder_vars[i]) == 1) FacilityModelBuilder.add_facility_constraint(self, self.p_facilities) FacilityModelBuilder.add_opening_constraint(self, r_fac, r_cli) def from_geodataframe(cls, gdf_demand: GeoDataFrame, gdf_fac: GeoDataFrame, demand_col: str, facility_col: str, weights_cols: str, p_facilities: int, facility_capacity_col: str=None, k_array: np.array=None, distance_metric: str='euclidean', name: str='k-nearest-p-median'): if (gdf_demand.crs is None): raise ValueError('GeoDataFrame ``gdf_demand`` does not have a valid CRS.') if (gdf_fac.crs is None): raise ValueError('GeoDataFrame ``gdf_fac`` does not have a valid CRS.') if (gdf_demand.crs != gdf_fac.crs): raise ValueError(f'Geodataframes crs are different: gdf_demand-{gdf_demand.crs}, gdf_fac-{gdf_fac.crs}') dem = gdf_demand[demand_col] fac = gdf_fac[facility_col] dem_data = np.array([dem.x.to_numpy(), dem.y.to_numpy()]).T fac_data = np.array([fac.x.to_numpy(), fac.y.to_numpy()]).T if (k_array is None): k_array = np.full(len(dem_data), np.minimum(len(fac_data), 5)) elif (not isinstance(k_array, np.ndarray)): raise TypeError('`k_array` should be a numpy array.') elif (not (k_array <= len(fac_data)).all()): raise ValueError(f'The value of `k` should be no more than the number of total facilities, which is {len(fac_data)}.') service_load = gdf_demand[weights_cols].to_numpy() weights_sum = service_load.sum() facility_capacities = None if (facility_capacity_col is not None): facility_capacities = gdf_fac[facility_capacity_col].to_numpy() return KNearestPMedian((('capacitated-' + name) if (facility_capacities is not None) else name), weights_sum, dem_data, fac_data, service_load, k_array, p_facilities, facility_capacities, distance_metric) def facility_client_array(self) -> None: fac_vars = getattr(self, 'fac_vars') cli_vars = getattr(self, 'cli_assgn_vars') len_fac_vars = len(fac_vars) self.fac2cli = [] for j in range(len_fac_vars): array_cli = [] if (fac_vars[j].value() > 0): for i in range(len(cli_vars)): if (((i, j) in cli_vars) and (cli_vars[(i, j)].value() > 0)): array_cli.append(i) self.fac2cli.append(array_cli) def solve(self, solver: pulp.LpSolver, results: bool=True): sum_gi = 1 while (sum_gi > 0): self._create_sparse_matrix() self._from_sparse_matrix() self.problem.solve(solver) self.check_status() placeholder_vars = getattr(self, 'placeholder_vars') sum_gi = sum((placeholder_vars[i].value() for i in range(len(placeholder_vars)) if (placeholder_vars[i].value() > 0))) if (sum_gi > 0): self._update_k_array() if results: self.facility_client_array() self.client_facility_array() self.get_mean_distance() return self
class HDDFeatureExtractor(nn.Module): def __init__(self, args): super(HDDFeatureExtractor, self).__init__() if (args.inputs in ['camera', 'sensor', 'multimodal']): self.with_camera = ('sensor' not in args.inputs) self.with_sensor = ('camera' not in args.inputs) else: raise RuntimeError('Unknown inputs of {}'.format(args.inputs)) if (self.with_camera and self.with_sensor): self.fusion_size = (1280 + 20) elif self.with_camera: self.fusion_size = 1280 elif self.with_sensor: self.fusion_size = 20 self.camera_linear = nn.Sequential(nn.Conv2d(1536, 20, kernel_size=1), nn.ReLU(inplace=True), Flatten()) self.sensor_linear = nn.Sequential(nn.Linear(8, 20), nn.ReLU(inplace=True)) def forward(self, camera_input, sensor_input): if self.with_camera: camera_input = self.camera_linear(camera_input) if self.with_sensor: sensor_input = self.sensor_linear(sensor_input) if (self.with_camera and self.with_sensor): fusion_input = torch.cat((camera_input, sensor_input), 1) elif self.with_camera: fusion_input = camera_input elif self.with_sensor: fusion_input = sensor_input return fusion_input
def relayfee(network: 'Network'=None) -> int: from .simple_config import FEERATE_DEFAULT_RELAY, FEERATE_MAX_RELAY if (network and (network.relay_fee is not None)): fee = network.relay_fee else: fee = FEERATE_DEFAULT_RELAY fee = min(fee, FEERATE_MAX_RELAY) fee = max(fee, FEERATE_DEFAULT_RELAY) return fee
class TestMulticlassPrecisionRecallCurve(unittest.TestCase): def test_multiclass_precision_recall_curve_base(self) -> None: input = torch.tensor([[0.1, 0.2, 0.1], [0.4, 0.2, 0.1], [0.6, 0.1, 0.2], [0.4, 0.2, 0.3], [0.6, 0.2, 0.4]]) target = torch.tensor([0, 1, 2, 1, 0]) my_compute_result = multiclass_precision_recall_curve(input, target, num_classes=3) expected_result = ([torch.tensor([0.4, 0.25, 0.5, 1.0]), torch.tensor([0.4, 0.5, 1.0]), torch.tensor([0.2, 0., 0.0, 0.0, 1.0])], [torch.tensor([1.0, 0.5, 0.5, 0.0]), torch.tensor([1.0, 1.0, 0.0]), torch.tensor([1.0, 1.0, 0.0, 0.0, 0.0])], [torch.tensor([0.1, 0.4, 0.6]), torch.tensor([0.1, 0.2]), torch.tensor([0.1, 0.2, 0.3, 0.4])]) torch.testing.assert_close(my_compute_result, expected_result, equal_nan=True, atol=1e-08, rtol=1e-05) input = torch.tensor([[0.1, 0.2, 0.1, 0.5], [0.4, 0.3, 0.1, 0.9], [0.7, 0.1, 0.2, 0.1], [0.4, 0.2, 0.9, 0.2], [0.6, 0.8, 0.4, 0.6]]) target = torch.tensor([3, 1, 2, 1, 0]) my_compute_result = multiclass_precision_recall_curve(input, target, num_classes=4) expected_result = ([torch.tensor([0.2, 0.25, 0.5, 0.0, 1.0]), torch.tensor([0.4, 0.5, 0.5, 0.0, 1.0]), torch.tensor([0.2, 0., 0.0, 0.0, 1.0]), torch.tensor([0.2, 0.25, 0., 0.0, 0.0, 1.0])], [torch.tensor([1.0, 1.0, 1.0, 0.0, 0.0]), torch.tensor([1.0, 1.0, 0.5, 0.0, 0.0]), torch.tensor([1.0, 1.0, 0.0, 0.0, 0.0]), torch.tensor([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])], [torch.tensor([0.1, 0.4, 0.6, 0.7]), torch.tensor([0.1, 0.2, 0.3, 0.8]), torch.tensor([0.1, 0.2, 0.4, 0.9]), torch.tensor([0.1, 0.2, 0.5, 0.6, 0.9])]) torch.testing.assert_close(my_compute_result, expected_result, equal_nan=True, atol=1e-08, rtol=1e-05) def test_multiclass_precision_recall_curve_label_not_exist(self) -> None: input = torch.tensor([[0.1, 0.2, 0.1, 0.5], [0.4, 0.3, 0.1, 0.9], [0.7, 0.1, 0.2, 0.1], [0.4, 0.2, 0.9, 0.2], [0.6, 0.8, 0.4, 0.6]]) target = torch.tensor([2, 1, 2, 1, 0]) my_compute_result = multiclass_precision_recall_curve(input, target, num_classes=4) expected_result = ([torch.tensor([0.2, 0.25, 0.5, 0.0, 1.0]), torch.tensor([0.4, 0.5, 0.5, 0.0, 1.0]), torch.tensor([0.4, 0., 0.0, 0.0, 1.0]), torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 1.0])], [torch.tensor([1.0, 1.0, 1.0, 0.0, 0.0]), torch.tensor([1.0, 1.0, 0.5, 0.0, 0.0]), torch.tensor([1.0, 0.5, 0.0, 0.0, 0.0]), torch.tensor([1.0, 1.0, 1.0, 1.0, 1.0, 0.0])], [torch.tensor([0.1, 0.4, 0.6, 0.7]), torch.tensor([0.1, 0.2, 0.3, 0.8]), torch.tensor([0.1, 0.2, 0.4, 0.9]), torch.tensor([0.1, 0.2, 0.5, 0.6, 0.9])]) torch.testing.assert_close(my_compute_result, expected_result, equal_nan=True, atol=1e-08, rtol=1e-05) def test_multiclass_precision_recall_curve_invalid_input(self) -> None: with self.assertRaisesRegex(ValueError, 'The `input` and `target` should have the same first dimension, got shapes torch.Size\\(\\[4, 2\\]\\) and torch.Size\\(\\[3\\]\\).'): multiclass_precision_recall_curve(torch.rand(4, 2), torch.rand(3), num_classes=2) with self.assertRaisesRegex(ValueError, 'target should be a one-dimensional tensor, got shape torch.Size\\(\\[3, 2\\]\\).'): multiclass_precision_recall_curve(torch.rand(3, 2), torch.rand(3, 2), num_classes=2) with self.assertRaisesRegex(ValueError, 'input should have shape of \\(num_sample, num_classes\\), got torch.Size\\(\\[3, 4\\]\\) and num_classes=2.'): multiclass_precision_recall_curve(torch.rand(3, 4), torch.rand(3), num_classes=2)
def find_dataset_using_name(dataset_name): dataset_filename = (('data.' + dataset_name) + '_dataset') datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = (dataset_name.replace('_', '') + 'dataset') for (name, cls) in datasetlib.__dict__.items(): if (name.lower() == target_dataset_name.lower()): dataset = cls if (dataset is None): raise ValueError(('In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name))) return dataset
class W_ThreadCell(W_Object): errorname = 'thread-cell' _immutable_fields_ = ['initial', 'preserved'] _attrs_ = ['initial', 'preserved', 'value'] _table = ThreadCellTable() def __init__(self, val, preserved): self.value = val self.initial = val self.preserved = preserved W_ThreadCell._table.add_handle(self) def set(self, val): self.value = val def get(self): return self.value
_model_architecture('char_source_transformer', 'char_source_transformer') def base_architecture(args): transformer.base_architecture(args) args.char_cnn_params = getattr(args, 'char_cnn_params', '[(50, 1), (100,2)]') args.char_cnn_nonlinear_fn = getattr(args, 'chr_cnn_nonlinear_fn', 'relu') args.char_cnn_num_highway_layers = getattr(args, 'char_cnn_num_highway_layers', '2')
def test_sample_partially_observed(): with pm.Model() as m: with pytest.warns(ImputationWarning): x = pm.Normal('x', observed=np.array([0, 1, np.nan])) idata = pm.sample(nuts_sampler='numpyro', chains=1, draws=10, tune=10) assert (idata.observed_data['x_observed'].shape == (2,)) assert (idata.posterior['x_unobserved'].shape == (1, 10, 1)) assert (idata.posterior['x'].shape == (1, 10, 3))
def split_documents(documents: dict) -> dict: (titles, texts) = ([], []) for (title, text) in zip(documents['title'], documents['text']): if (text is not None): for passage in split_text(text): titles.append((title if (title is not None) else '')) texts.append(passage) return {'title': titles, 'text': texts}
.parametrize('add_version_condition', [True, False]) def test_model_version_attribute_save(add_version_condition: bool) -> None: item = VersionedModel('test_user_name', email='test_') with patch(PATCH_METHOD) as req: req.return_value = {} item.save(add_version_condition=add_version_condition) args = req.call_args[0][1] params = {'Item': {'name': {'S': 'test_user_name'}, 'email': {'S': 'test_'}, 'version': {'N': '1'}}, 'ReturnConsumedCapacity': 'TOTAL', 'TableName': 'VersionedModel'} if add_version_condition: params.update({'ConditionExpression': 'attribute_not_exists (#0)', 'ExpressionAttributeNames': {'#0': 'version'}}) assert (args == params) deep_eq(args, params, _assert=True) item.version = 1 item.name = 'test_new_username' item.save(add_version_condition=add_version_condition) args = req.call_args[0][1] params = {'Item': {'name': {'S': 'test_new_username'}, 'email': {'S': 'test_'}, 'version': {'N': '2'}}, 'ReturnConsumedCapacity': 'TOTAL', 'TableName': 'VersionedModel'} if add_version_condition: params.update({'ConditionExpression': '#0 = :0', 'ExpressionAttributeNames': {'#0': 'version'}, 'ExpressionAttributeValues': {':0': {'N': '1'}}}) assert (args == params)
_pypy def test_class_method_with_metaclass_spy(mocker: MockerFixture) -> None: class MetaFoo(type): pass class Foo(): __metaclass__ = MetaFoo def bar(cls, arg): return (arg * 2) spy = mocker.spy(Foo, 'bar') assert (Foo.bar(arg=10) == 20) Foo.bar.assert_called_once_with(arg=10) assert (Foo.bar.spy_return == 20) spy.assert_called_once_with(arg=10) assert (spy.spy_return == 20)
class GreedyBipartiteMatcherTest(tf.test.TestCase): def test_get_expected_matches_when_all_rows_are_valid(self): similarity_matrix = tf.constant([[0.5, 0.1, 0.8], [0.15, 0.2, 0.3]]) num_valid_rows = 2 expected_match_results = [(- 1), 1, 0] matcher = bipartite_matcher.GreedyBipartiteMatcher() match = matcher.match(similarity_matrix, num_valid_rows=num_valid_rows) with self.test_session() as sess: match_results_out = sess.run(match._match_results) self.assertAllEqual(match_results_out, expected_match_results) def test_get_expected_matches_with_valid_rows_set_to_minus_one(self): similarity_matrix = tf.constant([[0.5, 0.1, 0.8], [0.15, 0.2, 0.3]]) num_valid_rows = (- 1) expected_match_results = [(- 1), 1, 0] matcher = bipartite_matcher.GreedyBipartiteMatcher() match = matcher.match(similarity_matrix, num_valid_rows=num_valid_rows) with self.test_session() as sess: match_results_out = sess.run(match._match_results) self.assertAllEqual(match_results_out, expected_match_results) def test_get_no_matches_with_zero_valid_rows(self): similarity_matrix = tf.constant([[0.5, 0.1, 0.8], [0.15, 0.2, 0.3]]) num_valid_rows = 0 expected_match_results = [(- 1), (- 1), (- 1)] matcher = bipartite_matcher.GreedyBipartiteMatcher() match = matcher.match(similarity_matrix, num_valid_rows=num_valid_rows) with self.test_session() as sess: match_results_out = sess.run(match._match_results) self.assertAllEqual(match_results_out, expected_match_results) def test_get_expected_matches_with_only_one_valid_row(self): similarity_matrix = tf.constant([[0.5, 0.1, 0.8], [0.15, 0.2, 0.3]]) num_valid_rows = 1 expected_match_results = [(- 1), (- 1), 0] matcher = bipartite_matcher.GreedyBipartiteMatcher() match = matcher.match(similarity_matrix, num_valid_rows=num_valid_rows) with self.test_session() as sess: match_results_out = sess.run(match._match_results) self.assertAllEqual(match_results_out, expected_match_results)
def read_yaml_file(filename): try: with open(filename) as yaml_file: data = yaml.safe_load(yaml_file) return data except IOError as error: if (LOGGER is None): print(f'File error: {str(error)}') else: LOGGER.error(f'File error: {str(error)}')
def pretty_print_results(args, address_to_index, p2p_bw, results): if args.markdown: print('```') print('Shuffle benchmark') print_separator(separator='-') print_key_value(key='Backend', value=f'{args.backend}') print_key_value(key='Partition size', value=f'{format_bytes(args.partition_size)}') print_key_value(key='Input partitions', value=f'{args.in_parts}') print_key_value(key='Protocol', value=f'{args.protocol}') print_key_value(key='Device(s)', value=f'{args.devs}') if args.device_memory_limit: print_key_value(key='Device memory limit', value=f'{format_bytes(args.device_memory_limit)}') print_key_value(key='RMM Pool', value=f'{(not args.disable_rmm_pool)}') if (args.protocol in ['ucx', 'ucxx']): print_key_value(key='TCP', value=f'{args.enable_tcp_over_ucx}') print_key_value(key='InfiniBand', value=f'{args.enable_infiniband}') print_key_value(key='NVLink', value=f'{args.enable_nvlink}') print_key_value(key='Worker thread(s)', value=f'{args.threads_per_worker}') print_key_value(key='Data processed', value=f'{format_bytes(results[0][0])}') if args.markdown: print('\n```') (data_processed, durations) = zip(*results) print_throughput_bandwidth(args, durations, data_processed, p2p_bw, address_to_index)
def rename_state_to_visibility(migrator: playhouse.migrate.SqliteMigrator): with database.db.atomic(): database.db.execute(migrator._alter_table(migrator.make_context(), 'multiplayer_session').literal(' RENAME COLUMN ').sql(peewee.Entity('state')).literal(' TO ').sql(peewee.Entity('visibility'))) database.db.execute_sql("UPDATE multiplayer_session SET visibility='visible' WHERE visibility='setup' OR visibility='in-progress'") database.db.execute_sql("UPDATE multiplayer_session SET visibility='hidden' WHERE visibility='finished'")
def all_scores(mols, data, norm=False, reconstruction=False): m0 = {k: list(filter((lambda e: (e is not None)), v)) for (k, v) in {'NP score': MolecularMetrics.natural_product_scores(mols, norm=norm), 'QED score': MolecularMetrics.quantitative_estimation_druglikeness_scores(mols), 'logP score': MolecularMetrics.water_octanol_partition_coefficient_scores(mols, norm=norm), 'SA score': MolecularMetrics.synthetic_accessibility_score_scores(mols, norm=norm), 'diversity score': MolecularMetrics.diversity_scores(mols, data), 'drugcandidate score': MolecularMetrics.drugcandidate_scores(mols, data)}.items()} m1 = {'valid score': (MolecularMetrics.valid_total_score(mols) * 100), 'unique score': (MolecularMetrics.unique_total_score(mols) * 100), 'novel score': (MolecularMetrics.novel_total_score(mols, data) * 100)} return (m0, m1)
def _fix_indentation(content: str) -> str: lines = content.splitlines(keepends=True) first_indent = _get_leading_spaces(content) first_line = lines[0][first_indent:] if (len(lines) == 1): return first_line second_indent = _get_leading_spaces(lines[1]) if first_line.rstrip().endswith(':'): second_indent -= 4 first_indent = max(first_indent, second_indent) content = (first_line + ''.join((line[first_indent:] for line in lines[1:]))) return content
class unit_gtcn_689(nn.Module): def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): super(unit_gtcn_689, self).__init__() inter_channels = (out_channels // coff_embedding) self.inter_c = inter_channels self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32))) nn.init.constant(self.PA, 1e-06) self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False) self.num_subset = num_subset self.conv_d = nn.ModuleList() self.conv_T1 = nn.ModuleList() self.conv_T2 = nn.ModuleList() for i in range(self.num_subset): self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1)) self.conv_T1.append(nn.Conv2d(in_channels, inter_channels, (9, 1), padding=(4, 0))) self.conv_T2.append(nn.Conv2d(in_channels, inter_channels, (9, 1), padding=(4, 0))) if (in_channels != out_channels): self.down = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels)) else: self.down = (lambda x: x) self.bn = nn.BatchNorm2d(out_channels) self.soft = nn.Softmax((- 2)) self.relu = nn.ReLU() self.A_ch3 = (((4 * torch.pow(self.A, 2)) - self.A) - (2 * torch.eye(self.A.size((- 1))))) for m in self.modules(): if isinstance(m, nn.Conv2d): conv_init(m) elif isinstance(m, nn.BatchNorm2d): bn_init(m, 1) bn_init(self.bn, 1e-06) for i in range(self.num_subset): conv_branch_init(self.conv_d[i], self.num_subset) def forward(self, x): (N, C, T, V) = x.size() A_ch3 = self.A_ch3.cuda(x.get_device()) A = (A_ch3 + self.PA) y = None for i in range(self.num_subset): A_T1 = self.conv_T1[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, (self.inter_c * T)) A_T2 = self.conv_T2[i](x).view(N, (self.inter_c * T), V) A_T1 = self.soft((torch.matmul(A_T1, A_T2) / A_T1.size((- 1)))) A1 = (A[i] + A_T1) A2 = x.view(N, (C * T), V) z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) y = ((z + y) if (y is not None) else z) y = self.bn(y) y += self.down(x) return self.relu(y)
class MatchingForTraining(torch.nn.Module): def __init__(self, config={}): super().__init__() self.superpoint = SuperPoint(config.get('superpoint', {})) self.superglue = SuperGlue(config.get('superglue', {})) def forward(self, data): pred = {} if ('keypoints0' not in data): pred0 = self.superpoint({'image': data['image0']}) pred = {**pred, **{(k + '0'): v for (k, v) in pred0.items()}} if ('keypoints1' not in data): pred1 = self.superpoint({'image': data['image1']}) pred = {**pred, **{(k + '1'): v for (k, v) in pred1.items()}} data = {**data, **pred} for k in data: if (k == 'file_name'): continue if isinstance(data[k], (list, tuple)): data[k] = torch.stack(data[k]) data[k].requres_grad = True pred = {**pred, **self.superglue(data)} pred = {**pred, **data} for k in pred: if ((k == 'file_name') or (k == 'skip_train')): continue pred[k].requres_grad = True return pred
(repr=False, frozen=True, slots=True) class _MaxLengthValidator(): max_length = attrib() def __call__(self, inst, attr, value): if (len(value) > self.max_length): msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" raise ValueError(msg) def __repr__(self): return f'<max_len validator for {self.max_length}>'
def _throughput_compute(num_processed: int, elapsed_time_sec: float) -> torch.Tensor: if (num_processed < 0): raise ValueError(f'Expected num_processed to be a non-negative number, but received {num_processed}.') if (elapsed_time_sec <= 0): raise ValueError(f'Expected elapsed_time_sec to be a positive number, but received {elapsed_time_sec}.') return torch.tensor((num_processed / elapsed_time_sec))
class GamePresetDescriber(): def _calculate_pickup_pool(self, configuration: BaseConfiguration) -> list[str]: expected_starting_count = self.expected_starting_item_count(configuration) expected_shuffled_count = self.expected_shuffled_pickup_count(configuration) shuffled_list = [] starting_list = [] is_vanilla_starting = True excluded_list = [] for (standard_pickup, pickup_state) in configuration.standard_pickup_configuration.pickups_state.items(): if standard_pickup.hide_from_gui: continue starting_count = pickup_state.num_included_in_starting_pickups shuffled_count = (pickup_state.num_shuffled_pickups + int(pickup_state.include_copy_in_original_location)) if (starting_count != expected_starting_count[standard_pickup]): if (starting_count > 1): starting_list.append(f'{starting_count}x {standard_pickup.name}') elif (starting_count == 1): starting_list.append(standard_pickup.name) else: is_vanilla_starting = False if (shuffled_count == 0): excluded_list.append(standard_pickup.name) if (shuffled_count != expected_shuffled_count[standard_pickup]): if (shuffled_count > 1): shuffled_list.append(f'{shuffled_count}x {standard_pickup.name}') elif (shuffled_count == 1): shuffled_list.append(standard_pickup.name) elif (starting_count == 0): excluded_list.append(standard_pickup.name) result = [] if starting_list: result.append(('Starts with ' + ', '.join(starting_list))) elif is_vanilla_starting: result.append('Vanilla starting items') if excluded_list: result.append(('Excludes ' + ', '.join(excluded_list))) if shuffled_list: result.append(('Shuffles ' + ', '.join(shuffled_list))) return result def format_params(self, configuration: BaseConfiguration) -> dict[(str, list[str])]: game_description = default_database.game_description_for(configuration.game) standard_pickups = configuration.standard_pickup_configuration template_strings = collections.defaultdict(list) unsupported = configuration.unsupported_features() if unsupported: template_strings['WARNING!'] = ['This preset uses the following unsupported features:', ', '.join(unsupported)] randomization_mode = configuration.available_locations.randomization_mode if (standard_pickups.minimum_random_starting_pickups == standard_pickups.maximum_random_starting_pickups): random_starting_pickups = f'{standard_pickups.minimum_random_starting_pickups}' else: random_starting_pickups = '{} to {}'.format(standard_pickups.minimum_random_starting_pickups, standard_pickups.maximum_random_starting_pickups) template_strings['Logic Settings'].append(configuration.trick_level.pretty_description(game_description)) if (not configuration.logical_resource_action.is_default()): template_strings['Logic Settings'].append(f'{configuration.logical_resource_action.long_name} dangerous actions') if (randomization_mode != RandomizationMode.default()): template_strings['Item Pool'].append(randomization_mode.description) per_category_pool = pool_creator.calculate_pool_pickup_count(configuration) if (configuration.available_locations.randomization_mode is RandomizationMode.FULL): (pool_items, maximum_size) = pool_creator.get_total_pickup_count(per_category_pool) template_strings['Item Pool'].append(f'Size: {pool_items} of {maximum_size}') else: for (category, (count, num_nodes)) in per_category_pool.items(): if isinstance(category, LocationCategory): template_strings['Item Pool'].append(f'{category.long_name}: {count}/{num_nodes}') if (random_starting_pickups != '0'): template_strings['Item Pool'].append(f'{random_starting_pickups} random starting items') template_strings['Item Pool'].extend(self._calculate_pickup_pool(configuration)) if (configuration.damage_strictness != LayoutDamageStrictness.MEDIUM): template_strings['Difficulty'].append(f'{configuration.damage_strictness.long_name} damage strictness') if (configuration.pickup_model_style != PickupModelStyle.ALL_VISIBLE): template_strings['Difficulty'].append(f'Pickup: {configuration.pickup_model_style.long_name} ({configuration.pickup_model_data_source.long_name})') starting_locations = configuration.starting_location.locations if (len(starting_locations) == 1): area = game_description.region_list.area_by_area_location(starting_locations[0]) starting_location = f'Starts at {game_description.region_list.area_name(area)}' else: starting_location = f'{len(starting_locations)} starting locations' template_strings['Gameplay'].append(starting_location) dock_rando = configuration.dock_rando if dock_rando.is_enabled(): template_strings['Gameplay'].append(dock_rando.mode.description) return template_strings def expected_starting_item_count(self, configuration: BaseConfiguration) -> dict[(StandardPickupDefinition, int)]: return {major: major.default_starting_count for major in configuration.standard_pickup_configuration.pickups_state.keys()} def expected_shuffled_pickup_count(self, configuration: BaseConfiguration) -> dict[(StandardPickupDefinition, int)]: return {major: major.default_shuffled_count for major in configuration.standard_pickup_configuration.pickups_state.keys()}
def adam(func, x, n_iter, learning_rate=0.001, beta1=0.9, beta2=0.999, eps=1e-08): V = 0.0 S = 0.0 for i in range((n_iter + 1)): (_, grad) = func(x) V = ((beta1 * V) + ((1 - beta1) * grad)) S = ((beta2 * S) + ((1 - beta2) * (grad ** 2))) V_hat = (V / (1 - (beta1 ** (i + 1)))) S_hat = (S / (1 - (beta2 ** (i + 1)))) x -= ((learning_rate * V_hat) / (np.sqrt(S_hat) + eps)) return x
class TestDriverHDF5Save(QiskitChemistryTestCase, TestDriver): def setUp(self): super().setUp() driver = HDF5Driver(hdf5_input=self.get_resource_path('test_driver_hdf5.hdf5')) temp_qmolecule = driver.run() (file, self.save_file) = tempfile.mkstemp(suffix='.hdf5') os.close(file) temp_qmolecule.save(self.save_file) driver = HDF5Driver(hdf5_input=self.save_file) self.qmolecule = driver.run() def tearDown(self): try: os.remove(self.save_file) except OSError: pass
def parse_args(): parser = optparse.OptionParser() parser.add_option('-f', '--file', dest='file_path') parser.add_option('-a', '--append', action='store_true', default=False) parser.add_option('-Q', '--quote', action='store_true', default=False) parser.add_option('-s', '--addspace', action='store_true', default=False) parser.add_option('-c', '--content', dest='content') return parser.parse_args()
def upgrade(saveddata_engine): saveddata_engine.execute(tmpTable) saveddata_engine.execute('INSERT INTO damagePatternsTemp (ID, name, emAmount, thermalAmount, kineticAmount, explosiveAmount, ownerID, created, modified) SELECT ID, name, emAmount, thermalAmount, kineticAmount, explosiveAmount, ownerID, created, modified FROM damagePatterns') saveddata_engine.execute('DROP TABLE damagePatterns') saveddata_engine.execute('ALTER TABLE damagePatternsTemp RENAME TO damagePatterns')
.parametrize('bin_op', [pytest.param((lambda a, b: (a + b)), id='add'), pytest.param((lambda a, b: (a - b)), id='sub'), pytest.param((lambda a, b: (a * b)), id='mul'), pytest.param(_div, id='div')]) def test_binopt_scalar(all_qevo, bin_op): obj = all_qevo scalar = (0.5 + 1j) for t in TESTTIMES: as_qevo = bin_op(obj, scalar)(t) as_qobj = bin_op(obj(t), scalar) _assert_qobj_almost_eq(as_qevo, as_qobj) if (bin_op is not _div): as_qevo = bin_op(scalar, obj)(t) as_qobj = bin_op(scalar, obj(t)) _assert_qobj_almost_eq(as_qevo, as_qobj)
class DepositfilesCom(BaseAccount): __name__ = 'DepositfilesCom' __type__ = 'account' __version__ = '0.39' __status__ = 'testing' __description__ = 'Depositfiles.com account plugin' __license__ = 'GPLv3' __authors__ = [('mkaay', ''), ('stickell', 'l.'), ('Walter Purcaro', '')] def grab_info(self, user, password, data): html = self.load(' validuntil = re.search('Sie haben Gold Zugang bis: <b>(.*?)</b></div>', html).group(1) validuntil = time.mktime(time.strptime(validuntil, '%Y-%m-%d %H:%M:%S')) return {'validuntil': validuntil, 'trafficleft': (- 1)} def signin(self, user, password, data): html = self.load(' get={'return': '/de/gold/payment.php'}, post={'login': user, 'password': password}) if ('<div class="error_message">Sie haben eine falsche Benutzername-Passwort-Kombination verwendet.</div>' in html): self.fail_login()
def cross_validation(edge_embs, edge_labels): (auc, mrr) = ([], []) (seed_nodes, num_nodes) = (np.array(list(edge_embs.keys())), len(edge_embs)) skf = KFold(n_splits=5, shuffle=True, random_state=seed) for (fold, (train_idx, test_idx)) in enumerate(skf.split(np.zeros((num_nodes, 1)), np.zeros(num_nodes))): print(f'Start Evaluation Fold {fold}!') (train_edge_embs, test_edge_embs, train_edge_labels, test_edge_labels) = ([], [], [], []) for each in train_idx: train_edge_embs.append(edge_embs[seed_nodes[each]]) train_edge_labels.append(edge_labels[seed_nodes[each]]) for each in test_idx: test_edge_embs.append(edge_embs[seed_nodes[each]]) test_edge_labels.append(edge_labels[seed_nodes[each]]) (train_edge_embs, test_edge_embs, train_edge_labels, test_edge_labels) = (np.concatenate(train_edge_embs), np.concatenate(test_edge_embs), np.concatenate(train_edge_labels), np.concatenate(test_edge_labels)) clf = LinearSVC(random_state=seed, max_iter=max_iter) clf.fit(train_edge_embs, train_edge_labels) preds = clf.predict(test_edge_embs) auc.append(roc_auc_score(test_edge_labels, preds)) confidence = clf.decision_function(test_edge_embs) (curr_mrr, conf_num) = ([], 0) for each in test_idx: test_edge_conf = np.argsort((- confidence[conf_num:(conf_num + len(edge_labels[seed_nodes[each]]))])) rank = np.empty_like(test_edge_conf) rank[test_edge_conf] = np.arange(len(test_edge_conf)) curr_mrr.append((1 / (1 + np.min(rank[np.argwhere((edge_labels[seed_nodes[each]] == 1)).flatten()])))) conf_num += len(rank) mrr.append(np.mean(curr_mrr)) assert (conf_num == len(confidence)) return (np.mean(auc), np.mean(mrr))
class SpMatrix(_PackedMatrixBase, _sp_matrix.SpMatrix): def __init__(self, num_rows=None, resize_type=_matrix_common.MatrixResizeType.SET_ZERO): super(SpMatrix, self).__init__() if (num_rows is not None): if (isinstance(num_rows, int) and (num_rows >= 0)): self.resize_(num_rows, resize_type) else: raise ValueError('num_rows should be a non-negative integer.') def clone(self): return SpMatrix(len(self)).copy_from_sp_(self)
def EfficientNetB0(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.0, 1.0, 224, 0.2, model_name='efficientnet-b0', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs)
def cast_waveunit(unit, force_match=True): if (unit in WAVELEN_UNITS): return 'nm' if (unit in WAVELENVAC_UNITS): return 'nm_vac' elif (unit in WAVENUM_UNITS): return 'cm-1' elif force_match: raise ValueError('Unknown wavespace unit: {0}. Should be one of {1}'.format(unit, ((WAVELEN_UNITS + WAVELENVAC_UNITS) + WAVENUM_UNITS))) else: return unit
class Effect744(BaseEffect): type = 'passive' def handler(fit, container, context, projectionRange, **kwargs): level = (container.level if ('skill' in context) else 1) fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('CPU Management')), 'duration', (container.getModifiedItemAttr('scanspeedBonus') * level), **kwargs)
def target(): options = ['A', 'B', 'C'] put_input('input', label='input') put_textarea('textarea', label='textarea', rows=3, code=None, maxlength=10, minlength=20, value=None, placeholder='placeholder', readonly=False, help_text='help_text') put_textarea('code', label='code', rows=4, code=True, maxlength=10, minlength=20, value=None, placeholder='placeholder', readonly=False, help_text='help_text') put_select('select', options=options, label='Input pin widget') put_select('select_multiple', options=options, label='select-multiple', multiple=True, value=None, help_text='help_text') put_checkbox('checkbox', options=options, label='checkbox', inline=False, value=None, help_text='help_text') put_checkbox('checkbox_inline', options=options, label='checkbox_inline', inline=True, value=None, help_text='help_text') put_radio('radio', options=options, label='radio', inline=False, value=None, help_text='help_text') put_radio('radio_inline', options=options, label='radio_inline', inline=True, value='B', help_text='help_text') put_actions('actions', buttons=['action_a', 'action_b'], label='actions') pin_update('input', help_text='This is help text') pin_update('select_multiple', value=['B', 'C']) pin.radio = 'B' assert ((yield pin['radio']) == (yield pin.radio) == 'B') names = ['input', 'textarea', 'code', 'select', 'select_multiple', 'checkbox', 'checkbox_inline', 'radio', 'radio_inline', 'actions'] values = {} on_change_values = {} for name in names: pin_on_change(name, (lambda val, name=name: on_change_values.__setitem__(name, val))) while (len(names) != len(values)): info = (yield pin_wait_change(*names)) values[info['name']] = info['value'] for name in names: assert ((yield pin[name]) == values.get(name) == on_change_values.get(name)) put_text(name, values.get(name), on_change_values.get(name)) put_text(PASSED_TEXT)
class CmdLearnSpell(Command): key = 'learnspell' help_category = 'magic' def func(self): spell_list = sorted(SPELLS.keys()) args = self.args.lower() args = args.strip(' ') caller = self.caller spell_to_learn = [] if ((not args) or (len(args) < 3)): caller.msg('Usage: learnspell <spell name>') return for spell in spell_list: if (args in spell.lower()): spell_to_learn.append(spell) if (spell_to_learn == []): caller.msg('There is no spell with that name.') return if (len(spell_to_learn) > 1): matched_spells = ', '.join(spell_to_learn) caller.msg(('Which spell do you mean: %s?' % matched_spells)) return if (len(spell_to_learn) == 1): spell_to_learn = spell_to_learn[0] if (spell_to_learn not in self.caller.db.spells_known): caller.db.spells_known.append(spell_to_learn) caller.msg(("You learn the spell '%s'!" % spell_to_learn)) return if (spell_to_learn in self.caller.db.spells_known): caller.msg(("You already know the spell '%s'!" % spell_to_learn)) '\n You will almost definitely want to replace this with your own system\n for learning spells, perhaps tied to character advancement or finding\n items in the game world that spells can be learned from.\n '
(frozen=True) class ReceiveWithdrawExpired(AuthenticatedSenderStateChange): message_identifier: MessageID canonical_identifier: CanonicalIdentifier total_withdraw: WithdrawAmount expiration: BlockExpiration nonce: Nonce participant: Address def channel_identifier(self) -> ChannelID: return self.canonical_identifier.channel_identifier def token_network_address(self) -> TokenNetworkAddress: return self.canonical_identifier.token_network_address
def host_tuple(url: QUrl) -> HostTupleType: ensure_valid(url) (scheme, host, port) = (url.scheme(), url.host(), url.port()) assert scheme if (not host): raise ValueError('Got URL {} without host.'.format(url.toDisplayString())) if (port == (- 1)): port_mapping = {' 80, ' 443, 'ftp': 21} try: port = port_mapping[scheme] except KeyError: raise ValueError('Got URL {} with unknown port.'.format(url.toDisplayString())) return (scheme, host, port)
def test_AddValueToZero_simple_weights(): dm = skcriteria.mkdm(matrix=[[1, 0, 3], [0, 5, 6]], objectives=[min, max, min], weights=[1, 2, 0]) expected = skcriteria.mkdm(matrix=[[1, 0, 3], [0, 5, 6]], objectives=[min, max, min], weights=[1.5, 2.5, 0.5]) scaler = AddValueToZero(value=0.5, target='weights') result = scaler.transform(dm) assert result.equals(expected)
def main(): learning_rate = 0.001 parser = argparse.ArgumentParser('Self-Supervised') parser.add_argument('--tau', type=float, default=1.0, metavar='LR') parser.add_argument('--EPS', type=float, default=1e-05, help='episillon') parser.add_argument('--weight-decay', type=float, default=1.5e-06, help='weight decay (default: 1e-4)') parser.add_argument('--lam1', type=float, default=0.0, metavar='LR') parser.add_argument('--lam2', type=float, default=1.0, metavar='LR') parser.add_argument('--local_crops_number', type=int, default=12) parser.add_argument('--min1', type=float, default=0.4, metavar='LR') parser.add_argument('--max1', type=float, default=1.0, metavar='LR') parser.add_argument('--min2', type=float, default=0.05, metavar='LR') parser.add_argument('--max2', type=float, default=0.4, metavar='LR') parser.add_argument('--gpu', type=int, default=1, metavar='gpu') parser.add_argument('--eval', type=str, default='no', metavar='gpu') parser.add_argument('--model', choices=['MLP', 'LeNet', 'ResNet18', 'ResNet50', 'ResNet101', 'RNN', 'GRU', 'LSTM', 'BiLSTM', 'CNN+GRU', 'ViT']) args = parser.parse_args() args.global_crops_scale = (args.min1, args.max1) args.local_crops_scale = (args.min2, args.max2) criterion = EntLoss(args, 0.0, 0.5) root = './Data/' (unsupervised_train_loader, supervised_train_loader, test_dataloader, model) = load_unsupervised_data_n_model(args.model, root) device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) model.to(device) print('Self-supervised encoder training') optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=args.weight_decay) for epoch in range(100): total_loss = 0 kl_loss = 0 eh_loss = 0 he_loss = 0 kde_loss = 0 for data in unsupervised_train_loader: (x, y) = data (x, y) = (x.to(device), y.to(device)) x1 = gaussian_noise(x, random.uniform(0, 2.0)) x2 = gaussian_noise(x, random.uniform(0.1, 2.0)) (feat_x1, feat_x2) = model(x1, x2) loss = criterion(feat_x1, feat_x2) loss_kl = loss['kl'] loss_eh = loss['eh'] loss_he = loss['he'] loss_kde = loss['kde'] loss = loss['final-kde'] optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.data kl_loss += loss_kl.data eh_loss += loss_eh.data he_loss += loss_he.data kde_loss += loss_kde.data print('epoch [{}/{}], total loss:{:.4f},kl loss:{:.4f},eh loss:{:.4f},he loss:{:.4f},kde loss:{:.4f}'.format((epoch + 1), 100, total_loss, kl_loss, eh_loss, he_loss, kde_loss)) def test(): model.eval() (correct_1, correct_2) = (0, 0) total = 0 with torch.no_grad(): for data in test_dataloader: (x, y) = data (x, y) = (x.to(device), y.to(device)) (y1, y2) = model(x, x, flag='supervised') (_, pred_1) = torch.max(y1.data, 1) (_, pred_2) = torch.max(y2.data, 1) total += y.size(0) correct_1 += (pred_1 == y).sum().item() correct_2 += (pred_2 == y).sum().item() print('Test accuracy: {:.2f}%, {:.2f}%'.format(((100 * correct_1) / total), ((100 * correct_2) / total))) print('Supervised classifier training') optimizer_supervised = torch.optim.Adam(model.classifier.parameters(), lr=learning_rate, weight_decay=1e-05) ce_criterion = nn.CrossEntropyLoss() for epoch in range(300): model.train() total_loss = 0 for data in supervised_train_loader: (x, y) = data x = Variable(x).to(device) y = y.type(torch.LongTensor) y = y.to(device) (y1, y2) = model(x, x, flag='supervised') loss = (ce_criterion(y1, y) + ce_criterion(y2, y)) optimizer_supervised.zero_grad() loss.backward() optimizer_supervised.step() total_loss += loss.data print('epoch [{}/{}], loss:{:.6f}'.format((epoch + 1), 300, total_loss)) if (epoch > 250): test() return
class GasMeter(GasMeterAPI): start_gas: int = None gas_refunded: int = None gas_remaining: int = None logger = get_extended_debug_logger('eth.gas.GasMeter') def __init__(self, start_gas: int, refund_strategy: RefundStrategy=default_refund_strategy) -> None: validate_uint256(start_gas, title='Start Gas') self.refund_strategy = refund_strategy self.start_gas = start_gas self.gas_remaining = self.start_gas self.gas_refunded = 0 def consume_gas(self, amount: int, reason: str) -> None: if (amount < 0): raise ValidationError('Gas consumption amount must be positive') if (amount > self.gas_remaining): raise OutOfGas(f'Out of gas: Needed {amount} - Remaining {self.gas_remaining} - Reason: {reason}') self.gas_remaining -= amount def return_gas(self, amount: int) -> None: if (amount < 0): raise ValidationError('Gas return amount must be positive') self.gas_remaining += amount def refund_gas(self, amount: int) -> None: self.gas_refunded = self.refund_strategy(self.gas_refunded, amount)
class ST_plus_TR_block_cross(nn.Module): def __init__(self, config, embed_feat): super(ST_plus_TR_block_cross, self).__init__() self.embed_features = embed_feat encoder_layer_actor = TransformerEncoderLayer_cluster(self.embed_features, config.Nhead, total_size=config.total_size, window_size=config.window_size, dropout=config.dropout_porb, normalize_before=True) encoder_norm_actor = nn.LayerNorm(self.embed_features) self.encoder_actor = TransformerEncoder_cluster(encoder_layer_actor, num_layers=config.num_encoder_layers, norm=encoder_norm_actor) encoder_layer_temp = TransformerEncoderLayer(self.embed_features, config.Nhead, dropout=config.dropout_porb, normalize_before=True) encoder_norm_temp = nn.LayerNorm(self.embed_features) self.encoder_temp = TransformerEncoder(encoder_layer_temp, num_layers=config.num_encoder_layers, norm=encoder_norm_temp) decoder_actor1 = TransformerDecoderLayer2(self.embed_features, config.Nhead, dropout=config.dropout_porb, normalize_before=True) decoder_norm_actor1 = nn.LayerNorm(self.embed_features) self.decoder_actor1 = TransformerDecoder(decoder_actor1, num_layers=config.num_decoder_layers, norm=decoder_norm_actor1) decoder_actor2 = TransformerDecoderLayer2(self.embed_features, config.Nhead, dropout=config.dropout_porb, normalize_before=True) decoder_norm_actor2 = nn.LayerNorm(self.embed_features) self.decoder_actor2 = TransformerDecoder(decoder_actor2, num_layers=config.num_decoder_layers, norm=decoder_norm_actor2) decoder_temp = TransformerDecoderLayer2(self.embed_features, config.Nhead, dropout=config.dropout_porb, normalize_before=True) decoder_norm_temp = nn.LayerNorm(self.embed_features) self.decoder_temp = TransformerDecoder(decoder_temp, num_layers=config.num_decoder_layers, norm=decoder_norm_temp) def forward(self, x, query): (N, B, T, C) = x.shape (tgt_len, bsz, dim) = query.shape actor_o = x.reshape(N, (B * T), (- 1)) (memory_actor, loss) = self.encoder_actor(actor_o) temp_o = x.permute(2, 1, 0, 3).contiguous().reshape(T, (B * N), (- 1)) memory_temp = self.encoder_temp(temp_o) memory_temp = memory_temp.reshape(T, B, N, (- 1)).permute(2, 1, 0, 3).contiguous().reshape(N, (B * T), (- 1)) memory1 = self.decoder_actor1(memory_actor, memory_temp) memory_actor = memory_actor.reshape(N, B, T, (- 1)).permute(2, 1, 0, 3).contiguous().reshape(T, (B * N), (- 1)) memory_temp = memory_temp.reshape(N, B, T, (- 1)).permute(2, 1, 0, 3).contiguous().reshape(T, (B * N), (- 1)) memory2 = self.decoder_actor2(memory_actor, memory_temp) memory1 = memory1.reshape(N, B, T, (- 1)) memory2 = memory2.reshape(T, B, N, (- 1)).permute(2, 1, 0, 3).contiguous().reshape(N, B, T, (- 1)) memory = (memory1 + memory2) tgt = self.decoder_temp(query, memory) tgt = tgt.reshape(tgt_len, bsz, dim) return (memory, tgt, loss)
class QlOsPosix(QlOs): def __init__(self, ql: Qiling): super().__init__(ql) self.ql = ql self.sigaction_act = ([0] * 256) conf = self.profile['KERNEL'] self.uid = self.euid = conf.getint('uid') self.gid = self.egid = conf.getint('gid') self.pid = conf.getint('pid') conf = self.profile['NETWORK'] self.ipv6 = conf.getboolean('ipv6') self.bindtolocalhost = conf.getboolean('bindtolocalhost') self.ifrname_ovr = conf.get('ifrname_override') self.posix_syscall_hooks = {QL_INTERCEPT.CALL: {}, QL_INTERCEPT.ENTER: {}, QL_INTERCEPT.EXIT: {}} self.__syscall_id_reg = {QL_ARCH.ARM64: UC_ARM64_REG_X8, QL_ARCH.ARM: UC_ARM_REG_R7, QL_ARCH.MIPS: UC_MIPS_REG_V0, QL_ARCH.X86: UC_X86_REG_EAX, QL_ARCH.X8664: UC_X86_REG_RAX, QL_ARCH.RISCV: UC_RISCV_REG_A7, QL_ARCH.RISCV64: UC_RISCV_REG_A7, QL_ARCH.PPC: UC_PPC_REG_0}[self.ql.arch.type] if ((self.ql.arch.type == QL_ARCH.ARM64) and (self.type == QL_OS.MACOS)): self.__syscall_id_reg = UC_ARM64_REG_X16 elif ((self.ql.arch.type == QL_ARCH.ARM) and (self.type == QL_OS.QNX)): self.__syscall_id_reg = UC_ARM_REG_R12 self.__syscall_cc: QlCC = {QL_ARCH.ARM64: aarch64, QL_ARCH.ARM: aarch32, QL_ARCH.MIPS: mipso32, QL_ARCH.X86: intel32, QL_ARCH.X8664: intel64, QL_ARCH.RISCV: riscv32, QL_ARCH.RISCV64: riscv64, QL_ARCH.PPC: ppc}[self.ql.arch.type](self.ql.arch) self.syscall_mapper = self.__get_syscall_mapper(self.ql.arch.type) self._fd = QlFileDes() self.stdin = self._stdin self.stdout = self._stdout self.stderr = self._stderr self._shm = QlShm() self._msq = QlMsq() def __get_syscall_mapper(self, archtype: QL_ARCH): qlos_path = f'.os.{self.type.name.lower()}.map_syscall' qlos_func = 'get_syscall_mapper' func = ql_get_module_function(qlos_path, qlos_func) return func(archtype) .setter def stdin(self, stream: TextIO) -> None: self._stdin = stream self._fd[0] = stream .setter def stdout(self, stream: TextIO) -> None: self._stdout = stream self._fd[1] = stream .setter def stderr(self, stream: TextIO) -> None: self._stderr = stream self._fd[2] = stream .getter def root(self) -> bool: return ((self.euid == 0) and (self.egid == 0)) .setter def root(self, enabled: bool) -> None: self.euid = (0 if enabled else self.uid) self.egid = (0 if enabled else self.gid) def set_syscall(self, target: Union[(int, str)], handler: Callable, intercept: QL_INTERCEPT=QL_INTERCEPT.CALL): if (type(target) is str): target = f'{SYSCALL_PREF}{target}' self.posix_syscall_hooks[intercept][target] = handler def set_api(self, target: str, handler: Callable, intercept: QL_INTERCEPT=QL_INTERCEPT.CALL): if self.ql.loader.is_driver: super().set_api(target, handler, intercept) else: self.function_hook.add_function_hook(target, handler, intercept) def getNameFromErrorCode(ret: int) -> str: if (type(ret) is not int): return '?' return f"{ret:#x}{(f' ({errors[(- ret)]})' if ((- ret) in errors) else f'')}" def load_syscall(self): syscall_id = self.get_syscall() syscall_name = self.syscall_mapper(syscall_id) hooks_dict = self.posix_syscall_hooks[QL_INTERCEPT.ENTER] onenter_hook = (hooks_dict.get(syscall_name) or hooks_dict.get(syscall_id)) hooks_dict = self.posix_syscall_hooks[QL_INTERCEPT.EXIT] onexit_hook = (hooks_dict.get(syscall_name) or hooks_dict.get(syscall_id)) hooks_dict = self.posix_syscall_hooks[QL_INTERCEPT.CALL] syscall_hook = (hooks_dict.get(syscall_name) or hooks_dict.get(syscall_id)) if (not syscall_hook): def __get_os_module(osname: str): return ql_get_module(f'.os.{osname.lower()}.syscall') os_syscalls = __get_os_module(self.type.name) posix_syscalls = __get_os_module('posix') syscall_hook = (getattr(os_syscalls, syscall_name, None) or getattr(posix_syscalls, syscall_name, None)) if syscall_hook: syscall_name = syscall_hook.__name__ param_names = tuple(signature(syscall_hook).parameters.values()) param_names = [info.name for info in param_names[1:] if (info.kind == Parameter.POSITIONAL_OR_KEYWORD)] params = [self.__syscall_cc.getRawParam(i) for i in range(len(param_names))] try: if onenter_hook: overrides = onenter_hook(self.ql, *params) if (overrides is not None): (_, params) = overrides retval = syscall_hook(self.ql, *params) if onexit_hook: override = onexit_hook(self.ql, *params, retval) if (override is not None): retval = override if (retval is not None): self.__syscall_cc.setReturnValue(retval) except KeyboardInterrupt: raise except Exception as e: self.ql.log.exception(f'Syscall ERROR: {syscall_name} DEBUG: {e}') raise e syscall_basename = syscall_name[(len(SYSCALL_PREF) if syscall_name.startswith(SYSCALL_PREF) else 0):] args = [] for (name, value) in zip(param_names, params): if name.startswith(f'{syscall_basename}_'): name = name.partition('_')[(- 1)] args.append((name, f'{value:#x}')) sret = QlOsPosix.getNameFromErrorCode(retval) self.utils.print_function(self.ql.arch.regs.arch_pc, syscall_basename, args, sret, False) self.stats.log_api_call(self.ql.arch.regs.arch_pc, syscall_name, dict(zip(param_names, params)), retval, None) else: self.ql.log.warning(f'{self.ql.arch.regs.arch_pc:#x}: syscall {syscall_name} number = {syscall_id:#x}({syscall_id:d}) not implemented') if self.ql.debug_stop: raise QlErrorSyscallNotFound(f'Syscall not found: {syscall_name}') def get_syscall(self) -> int: if (self.ql.arch.type == QL_ARCH.ARM): isize = (2 if self.ql.arch.is_thumb else self.ql.arch.pointersize) ibytes = self.ql.mem.read_ptr((self.ql.arch.regs.arch_pc - isize), isize) svc_imm = (ibytes & ((1 << ((isize - 1) * 8)) - 1)) if (svc_imm >= 9437184): return (svc_imm - 9437184) if (svc_imm > 0): return svc_imm return self.ql.arch.regs.read(self.__syscall_id_reg) def set_syscall_return(self, retval: int): self.__syscall_cc.setReturnValue(retval) def fd(self): return self._fd def shm(self): return self._shm def msq(self): return self._msq
def set_flat_params(model, flat_params, trainable=False): prev_ind = 0 for param in model.parameters(): if (trainable and (not param.requires_grad)): continue flat_size = int(param.numel()) param.data.copy_(flat_params[prev_ind:(prev_ind + flat_size)].view(param.size())) prev_ind += flat_size
class Effect6724(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Remote Armor Repair Systems')), 'duration', src.getModifiedItemAttr('eliteBonusLogistics3'), skill='Logistics Cruisers', **kwargs)
def _orient(array, wcs): if (array.ndim != 3): raise ValueError('Input array must be 3-dimensional') if (wcs.wcs.naxis != 3): raise ValueError('Input WCS must be 3-dimensional') wcs = wcs_utils.diagonal_wcs_to_cdelt(_fix_spectral(wcs)) axtypes = wcs.get_axis_types()[::(- 1)] types = [a['coordinate_type'] for a in axtypes] n_celestial = types.count('celestial') if (n_celestial == 0): raise ValueError('No celestial axes found in WCS') elif (n_celestial != 2): raise ValueError('WCS should contain 2 celestial dimensions but contains {0}'.format(n_celestial)) n_spectral = types.count('spectral') if (n_spectral == 0): raise ValueError('No spectral axes found in WCS') elif (n_spectral != 1): raise ValueError('WCS should contain one spectral dimension but contains {0}'.format(n_spectral)) nums = [(None if (a['coordinate_type'] != 'celestial') else a['number']) for a in axtypes] if ('stokes' in types): raise ValueError('Input WCS should not contain stokes') t = [types.index('spectral'), nums.index(1), nums.index(0)] if (t == [0, 1, 2]): result_array = array else: result_array = array.transpose(t) result_wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_SPECTRAL]) return (result_array, result_wcs)
class Package(models.Model): is_active = models.BooleanField(verbose_name=_('Is active'), default=True) name = models.CharField(verbose_name=_('Name'), max_length=255) description = models.TextField(verbose_name=_('Description'), blank=True) link = models.URLField(verbose_name=_('URL'), max_length=255) def __str__(self): return self.name def link_rss(self): return f' class Meta(): verbose_name = _('Package') verbose_name_plural = _('Packages')
_metaclass(abc.ABCMeta) class BaseGraph(object): def __init__(self, machine): self.machine = machine self.fsm_graph = None self.generate() def generate(self): def set_previous_transition(self, src, dst): def reset_styling(self): def set_node_style(self, state, style): def get_graph(self, title=None, roi_state=None): def _convert_state_attributes(self, state): label = state.get('label', state['name']) if self.machine.show_state_attributes: if ('tags' in state): label += ((' [' + ', '.join(state['tags'])) + ']') if ('on_enter' in state): label += ('\\l- enter:\\l + ' + '\\l + '.join(state['on_enter'])) if ('on_exit' in state): label += ('\\l- exit:\\l + ' + '\\l + '.join(state['on_exit'])) if ('timeout' in state): label += (((('\\l- timeout(' + state['timeout']) + 's) -> (') + ', '.join(state['on_timeout'])) + ')') return (label + '\\l') def _get_state_names(self, state): if isinstance(state, (list, tuple, set)): for res in state: for inner in self._get_state_names(res): (yield inner) else: (yield (self.machine.state_cls.separator.join(self.machine._get_enum_path(state)) if hasattr(state, 'name') else state)) def _transition_label(self, tran): edge_label = tran.get('label', tran['trigger']) if ('dest' not in tran): edge_label += ' [internal]' if (self.machine.show_conditions and any(((prop in tran) for prop in ['conditions', 'unless']))): edge_label = '{edge_label} [{conditions}]'.format(edge_label=edge_label, conditions=' & '.join((tran.get('conditions', []) + [('!' + u) for u in tran.get('unless', [])]))) return edge_label def _get_global_name(self, path): if path: state = path.pop(0) with self.machine(state): return self._get_global_name(path) else: return self.machine.get_global_name() def _flatten(self, *lists): return (e for a in lists for e in (self._flatten(*a) if isinstance(a, (tuple, list)) else ((a.name if hasattr(a, 'name') else a),))) def _get_elements(self): states = [] transitions = [] try: markup = self.machine.get_markup_config() queue = [([], markup)] while queue: (prefix, scope) = queue.pop(0) for transition in scope.get('transitions', []): if prefix: tran = copy.copy(transition) tran['source'] = self.machine.state_cls.separator.join((prefix + [tran['source']])) if ('dest' in tran): tran['dest'] = self.machine.state_cls.separator.join((prefix + [tran['dest']])) else: tran = transition transitions.append(tran) for state in (scope.get('children', []) + scope.get('states', [])): if (not prefix): sta = state states.append(sta) ini = state.get('initial', []) if (not isinstance(ini, list)): ini = (ini.name if hasattr(ini, 'name') else ini) tran = dict(trigger='', source=self.machine.state_cls.separator.join((prefix + [state['name']])), dest=self.machine.state_cls.separator.join((prefix + [state['name'], ini]))) transitions.append(tran) if state.get('children', []): queue.append(((prefix + [state['name']]), state)) except KeyError: _LOGGER.error('Graph creation incomplete!') return (states, transitions)
class keep_wl(): def __init__(self, labels): self.loss = torch.zeros(labels.shape[0], 1, dtype=torch.float).cuda(non_blocking=True) self.weight = torch.zeros(labels.shape[0], dtype=torch.float).cuda(non_blocking=True) def __call__(self, epoch_loss=None, epoch_weight=None, index=None): self.loss[index] = epoch_loss.detach().data if (epoch_weight is not None): self.weight[index] = epoch_weight.detach().data
def weight_pad(sim: QuantizationSimModel, layer_bw_dict: Dict[(str, WeightPaddingParams)]): for (layer_name, layer) in sim.quant_wrappers(): bw_values = layer_bw_dict[layer_name] param_quant_dict = layer.param_quantizers if (('weight' in param_quant_dict) and (bw_values.target_kernel_bw > bw_values.simulated_bw)): param_weight_quant = param_quant_dict['weight'] if (not param_weight_quant.enabled): continue layer_weight = layer._module_to_wrap.weight encoding_list = param_weight_quant.encoding if isinstance(encoding_list, libpymo.TfEncoding): encoding_list = [param_weight_quant.encoding] computed_encodings = [recompute_grid_params(curr_encoding, bw_values.simulated_bw, use_symmetric_encoding=param_weight_quant.use_symmetric_encodings) for curr_encoding in encoding_list] param_weight_quant.encoding = computed_encodings quant_dequant_weight = param_weight_quant.quantize_dequantize(layer_weight, MAP_ROUND_MODE_TO_PYMO['nearest']) layer._module_to_wrap.weight.data = quant_dequant_weight param_weight_quant.encoding = recompute_encodings(param_weight_quant, bw_values)
class NodeLaunchActor(): def run(self, master_addr, master_port, node_rank, dist_world_size, args): processes = [] current_env = os.environ.copy() current_env['MASTER_ADDR'] = master_addr current_env['MASTER_PORT'] = str(master_port) current_env['WORLD_SIZE'] = str(dist_world_size) if (('OMP_NUM_THREADS' not in os.environ) and (args.nproc_per_node > 1)): current_env['OMP_NUM_THREADS'] = str(1) print('\nSetting OMP_NUM_THREADS environment variable for each process to be {} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n'.format(current_env['OMP_NUM_THREADS'])) for local_rank in range(0, args.nproc_per_node): dist_rank = ((args.nproc_per_node * node_rank) + local_rank) current_env['RANK'] = str(dist_rank) current_env['LOCAL_RANK'] = str(local_rank) with_python = (not args.no_python) cmd = [] if with_python: cmd = [sys.executable, '-u'] if args.module: cmd.append('-m') else: if (not args.use_env): raise ValueError("When using the '--no_python' flag, you must also set the '--use_env' flag.") if args.module: raise ValueError("Don't use both the '--no_python' flagand the '--module' flag at the same time.") cmd.append(args.training_script) if (not args.use_env): cmd.append('--local_rank={}'.format(local_rank)) cmd.extend(args.training_script_args) process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if (process.returncode != 0): raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) def get_node_ip(self): return ray.services.get_node_ip_address() def find_free_port(self): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1]
def test_submit_forms_by_get(app, client): crawler = Crawler(client=client, initial_paths=['/'], rules=(PERMISSIVE_HYPERLINKS_ONLY_RULE_SET + SUBMIT_GET_FORMS_RULE_SET)) crawler.crawl() submitted_forms = [form for form in crawler.graph.get_nodes_by_source(FORM) if form.requested] assert (len(submitted_forms) == 1) form = submitted_forms[0] submissions = [entry for entry in lookup_requests(app, path=form.path, method=form.method) if entry.params] assert (len(submissions) == 1) assert ('/form-submitted-by-get-onward-link' in crawler.graph.visited_paths)
def handle_long_project_instruments_request(**kwargs) -> Any: headers = kwargs['headers'] resp = [{'instrument_name': 'form_1', 'instrument_label': 'Form 1'}, {'instrument_name': 'form_2', 'instrument_label': 'Form 2'}, {'instrument_name': 'form_3', 'instrument_label': 'Form 3'}] return (201, headers, json.dumps(resp))
def test_run_tests_in_workers_error_traceback(): def target_fn(): def inner_2(): def inner_1(): raise ValueError('42') inner_1() inner_2() try: run_tests_in_workers(target=target_fn, num_workers=1) except ValueError: (exc_type, exc_value, exc_traceback) = sys.exc_info() lines = traceback.format_tb(exc_traceback) expected_traceback = ('target_fn', 'inner_2', 'inner_1') for (expected_fname, line) in zip(expected_traceback[::(- 1)], lines[::(- 1)]): assert (expected_fname in line)
class ViewProviderAsmGroup(ViewProviderAsmBase): def claimChildren(self): return getattr(self.ViewObject.Object, 'Group', []) def doubleClicked(self, _vobj): return False def canDropObject(self, _child): return False def canReplaceObject(self, _oldObj, newObj): return (newObj in self.ViewObject.Object.Group) def replaceObject(self, oldObj, newObj): try: children = self.ViewObject.Object.Group old_idx = children.index(oldObj) new_idx = children.index(newObj) del children[new_idx] children.insert(old_idx, newObj) editGroup(self.ViewObject.Object, children) return True except Exception: return False def canReorderObject(self, obj, before): return ((before in self.ViewObject.Object.Group) and (obj in self.ViewObject.Object.Group)) def reorderObjects(self, objs, before): children = reorderObjects(self.ViewObject.Object.Group, objs, before) if (not children): return False editGroup(self.ViewObject.Object, children) return True
def tankSection(fit): ehp = ([fit.ehp[tank] for tank in tankTypes] if (fit.ehp is not None) else [0, 0, 0]) ehp.append(sum(ehp)) ehpStr = [formatAmount(ehpVal, 3, 0, 9) for ehpVal in ehp] resists = {tankType: [(1 - fit.ship.getModifiedItemAttr(s)) for s in resonanceNames[tankType]] for tankType in tankTypes} ehpAgainstDamageType = [sum(pattern.calculateEhp(fit.ship).values()) for pattern in damagePatterns] ehpAgainstDamageTypeStr = [formatAmount(ehpVal, 3, 0, 9) for ehpVal in ehpAgainstDamageType] def generalOutput(): rowNames = ['EHP'] rowNames.extend(RRTypes.names(postProcessor=(lambda v: v.capitalize()))) colNames = DmgTypes.names(short=True, postProcessor=(lambda v: (' ' + v.capitalize()))) colNames[0] = colNames[0][1:] outputScheme = [] for (index, rowName) in enumerate(rowNames): row = (rowName + ': {:>} (') subsValue = (' {:.0%},' if (index > 0) else ' {:>},') row += ''.join([((colName + ':') + subsValue) for colName in colNames]) row = (row[:(- 1)] + ')\n') outputScheme.append(row) return (((outputScheme[0].format(ehpStr[3], *ehpAgainstDamageTypeStr) + outputScheme[1].format(ehpStr[0], *resists['shield'])) + outputScheme[2].format(ehpStr[1], *resists['armor'])) + outputScheme[3].format(ehpStr[2], *resists['hull'])) return generalOutput()
class TestChangeOrganizationDetails(ApiTestCase): def test_changeinvoiceemail(self): self.login(ADMIN_ACCESS_USER) json = self.putJsonResponse(Organization, params=dict(orgname=ORGANIZATION), data=dict(invoice_email=True)) self.assertEqual(True, json['invoice_email']) json = self.putJsonResponse(Organization, params=dict(orgname=ORGANIZATION), data=dict(invoice_email=False)) self.assertEqual(False, json['invoice_email']) def test_changemail(self): self.login(ADMIN_ACCESS_USER) json = self.putJsonResponse(Organization, params=dict(orgname=ORGANIZATION), data=dict(email='')) self.assertEqual('', json['email'])