code
stringlengths
281
23.7M
def node_error_pass(get_error: Callable[([BMGraphBuilder, bn.BMGNode], Optional[BMGError])]) -> GraphFixer: def error_pass(bmg: BMGraphBuilder) -> GraphFixerResult: errors = ErrorReport() nodes = bmg.all_ancestor_nodes() for node in nodes: error = get_error(bmg, node) if (error is not None): errors.add_error(error) return (bmg, False, errors) return error_pass
def test_overlapping_schemas(hydra_restore_singletons: Any) -> None: cs = ConfigStore.instance() cs.store(name='config', node=Config) cs.store(group='plugin', name='concrete', node=ConcretePlugin) config_loader = ConfigLoaderImpl(config_search_path=create_config_search_path(None)) cfg = config_loader.load_configuration(config_name='config', overrides=[], run_mode=RunMode.RUN) with open_dict(cfg): del cfg['hydra'] assert (cfg == {'plugin': {'name': '???', 'params': '???'}}) assert (OmegaConf.get_type(cfg.plugin) == Plugin) cfg = config_loader.load_configuration(config_name='config', overrides=['+plugin=concrete'], run_mode=RunMode.RUN) with open_dict(cfg): del cfg['hydra'] assert (cfg == {'plugin': {'name': 'foobar_plugin', 'params': {'foo': 10}}}) assert (OmegaConf.get_type(cfg.plugin) == ConcretePlugin) assert (OmegaConf.get_type(cfg.plugin.params) == ConcretePlugin.FoobarParams) with raises(ValidationError): cfg.plugin = 10
def export_xml(color_list, export_grid=False): (export_color_list, export_cname_list) = get_export_color_list(color_list, export_grid=export_grid) xml_chars = '<!DOCTYPE PencilPalette>\n<palette>\n' for idx in range(len(export_color_list)): (r, g, b) = export_color_list[idx].rgb name = export_cname_list[idx] xml_chars += " <colour red='{}'{} green='{}'{} blue='{}'{} alpha='255' name='{}'/>\n".format(r, (' ' * (3 - len(str(r)))), g, (' ' * (3 - len(str(g)))), b, (' ' * (3 - len(str(b)))), name) xml_chars += '</palette>\n' return xml_chars
class UserConfigItem(): def __init__(self, desc: str=None) -> None: self.default_value = None self.is_optional = False self.item_type = 'str' self.desc = desc self.value = None self.user_set = False def clone(self): new_config_item = UserConfigItem() new_config_item.default_value = self.default_value new_config_item.is_optional = self.is_optional new_config_item.desc = self.desc new_config_item.item_type = self.item_type new_config_item.value = self.value return new_config_item
class OptionPlotoptionsColumnrangeSonificationContexttracksMappingLowpassFrequency(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def extractTranlatioWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class AddArticleForm(forms.Form): title = forms.CharField(error_messages={'required': ''}) content = forms.CharField(error_messages={'required': ''}) abstract = forms.CharField(required=False) cover_id = forms.CharField(required=False) category = forms.IntegerField(required=False) status = forms.IntegerField(required=False) pwd = forms.CharField(required=False) recommend = forms.BooleanField(required=False) word = forms.IntegerField(required=False) def clean_abstract(self): abstract = self.cleaned_data['abstract'] if abstract: return abstract content = self.cleaned_data.get('content') if content: abstract = PyQuery(markdown(content)).text()[:90] return abstract def clean_cover_id(self): cover_id = self.cleaned_data['cover_id'] if cover_id: return cover_id cover_set = Cover.objects.all().values('nid') cover_id = random.choice(cover_set)['nid'] return cover_id def clean_word(self): return len(self.cleaned_data.get('content', ''))
_ocx_not_available _not_currently_in_session def test_GetRealDataForCodesAsStream(entrypoint): from koapy import KiwoomOpenApiPlusRealType code = '005930' realtype_name = '' code_list = [code] fid_list = KiwoomOpenApiPlusRealType.get_fids_by_realtype_name(realtype_name) opt_type = '0' stream = entrypoint.GetRealDataForCodesAsStream(code_list, fid_list, opt_type, screen_no=None, infer_fids=True, readable_names=True, fast_parse=False) check_count = 10 events = itertools.islice(stream, check_count) events = list(events) stream.cancel() for event in events: assert (event.name == 'OnReceiveRealData') assert (event.arguments[0].string_value == code) assert (event.arguments[1].string_value in [''])
def test_python_random_determinism(): generator = random.Random() generator.seed('theseed', version=2) assert (generator.randint(0, 100) == 72) assert (generator.randint(0, 100) == 90) assert (generator.randint(0, 100) == 41) assert (generator.randint(0, 100) == 16) assert (generator.randint(0, 100) == 11)
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest): CONFIG = '\n lldp_beacon:\n send_interval: 5\n max_per_interval: 5\n interfaces:\n %(port_1)d:\n native_vlan: 100\n lldp_beacon:\n enable: True\n org_tlvs:\n - {oui: 0x12bb, subtype: 2, info: ""}\n' def test_untagged(self): first_host = self.hosts_name_ordered()[0] tcpdump_filter = 'ether proto 0x88cc' timeout = (5 * 3) tcpdump_txt = self.tcpdump_helper(first_host, tcpdump_filter, [(lambda : first_host.cmd(('sleep %u' % timeout)))], timeout=timeout, vflags='-vv', packets=1) for lldp_required in (('%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC), 'Application type \\[voice\\] \\(0x01\\), Flags \\[Tagged\\]Vlan id 50', 'System Name TLV \\(5\\), length 8: faucet-1', ('Port Description TLV \\(4\\), length [1-9]: b%u' % self.port_map['port_1'])): self.assertTrue(re.search(lldp_required, tcpdump_txt), msg=('%s: %s' % (lldp_required, tcpdump_txt)))
def test_pagination(setup_test_data, client, monkeypatch, helpers): helpers.mock_current_fiscal_year(monkeypatch) resp = client.get((url + '?limit=1')) assert (resp.status_code == status.HTTP_200_OK) response = resp.json() assert (len(response['results']) == 1) expected_results = [{'agency_name': 'Test Agency 2', 'abbreviation': 'XYZ', 'toptier_code': '987', 'agency_id': 2, 'current_total_budget_authority_amount': 100.0, 'recent_publication_date': f'{CURRENT_FISCAL_YEAR}-{(CURRENT_LAST_PERIOD + 1):02}-07T00:00:00Z', 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': 18.6, 'tas_accounts_total': 100.0, 'tas_obligation_not_in_gtas_total': 12.0, 'missing_tas_accounts_count': 1}, 'obligation_difference': 0.0, 'unlinked_contract_award_count': 40, 'unlinked_assistance_award_count': 60, 'assurance_statement_url': assurance_statement_2}] assert (response['results'] == expected_results) resp = client.get((url + '?limit=1&page=2')) assert (resp.status_code == status.HTTP_200_OK) response = resp.json() assert (len(response['results']) == 1) expected_results = [{'agency_name': 'Test Agency 3', 'abbreviation': 'AAA', 'toptier_code': '001', 'agency_id': 3, 'current_total_budget_authority_amount': 10.0, 'recent_publication_date': f'{CURRENT_FISCAL_YEAR}-07-07T00:00:00Z', 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': 20.0, 'tas_accounts_total': 100.0, 'tas_obligation_not_in_gtas_total': 0.0, 'missing_tas_accounts_count': 0}, 'obligation_difference': 10.0, 'unlinked_contract_award_count': 400, 'unlinked_assistance_award_count': 600, 'assurance_statement_url': assurance_statement_3}] assert (response['results'] == expected_results) resp = client.get((url + '?sort=obligation_difference&order=desc')) assert (resp.status_code == status.HTTP_200_OK) response = resp.json() assert (len(response['results']) == 3) expected_results = [{'agency_name': 'Test Agency 3', 'abbreviation': 'AAA', 'toptier_code': '001', 'agency_id': 3, 'current_total_budget_authority_amount': 10.0, 'recent_publication_date': f'{CURRENT_FISCAL_YEAR}-{(CURRENT_LAST_PERIOD + 1):02}-07T00:00:00Z', 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': 20.0, 'tas_accounts_total': 100.0, 'tas_obligation_not_in_gtas_total': 0.0, 'missing_tas_accounts_count': 0}, 'obligation_difference': 10.0, 'unlinked_contract_award_count': 400, 'unlinked_assistance_award_count': 600, 'assurance_statement_url': assurance_statement_3}, {'agency_name': 'Test Agency 2', 'abbreviation': 'XYZ', 'toptier_code': '987', 'agency_id': 2, 'current_total_budget_authority_amount': 100.0, 'recent_publication_date': f'{CURRENT_FISCAL_YEAR}-{(CURRENT_LAST_PERIOD + 1):02}-07T00:00:00Z', 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': 18.6, 'tas_accounts_total': 100.0, 'tas_obligation_not_in_gtas_total': 12.0, 'missing_tas_accounts_count': 1}, 'obligation_difference': 0.0, 'unlinked_contract_award_count': 40, 'unlinked_assistance_award_count': 60, 'assurance_statement_url': assurance_statement_2}, {'agency_name': 'Test Agency', 'abbreviation': 'ABC', 'toptier_code': '123', 'agency_id': 1, 'current_total_budget_authority_amount': None, 'recent_publication_date': None, 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': None, 'tas_accounts_total': None, 'tas_obligation_not_in_gtas_total': None, 'missing_tas_accounts_count': None}, 'obligation_difference': None, 'unlinked_contract_award_count': None, 'unlinked_assistance_award_count': None, 'assurance_statement_url': None}] assert (response['results'] == expected_results) resp = client.get((url + '?sort=unlinked_assistance_award_count&order=asc')) assert (resp.status_code == status.HTTP_200_OK) response = resp.json() assert (len(response['results']) == 3) expected_results = [{'agency_name': 'Test Agency 2', 'abbreviation': 'XYZ', 'toptier_code': '987', 'agency_id': 2, 'current_total_budget_authority_amount': 100.0, 'recent_publication_date': f'{CURRENT_FISCAL_YEAR}-{(CURRENT_LAST_PERIOD + 1):02}-07T00:00:00Z', 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': 18.6, 'tas_accounts_total': 100.0, 'tas_obligation_not_in_gtas_total': 12.0, 'missing_tas_accounts_count': 1}, 'obligation_difference': 0.0, 'unlinked_contract_award_count': 40, 'unlinked_assistance_award_count': 60, 'assurance_statement_url': assurance_statement_2}, {'agency_name': 'Test Agency 3', 'abbreviation': 'AAA', 'toptier_code': '001', 'agency_id': 3, 'current_total_budget_authority_amount': 10.0, 'recent_publication_date': f'{CURRENT_FISCAL_YEAR}-{(CURRENT_LAST_PERIOD + 1):02}-07T00:00:00Z', 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': 20.0, 'tas_accounts_total': 100.0, 'tas_obligation_not_in_gtas_total': 0.0, 'missing_tas_accounts_count': 0}, 'obligation_difference': 10.0, 'unlinked_contract_award_count': 400, 'unlinked_assistance_award_count': 600, 'assurance_statement_url': assurance_statement_3}, {'agency_name': 'Test Agency', 'abbreviation': 'ABC', 'toptier_code': '123', 'agency_id': 1, 'current_total_budget_authority_amount': None, 'recent_publication_date': None, 'recent_publication_date_certified': False, 'tas_account_discrepancies_totals': {'gtas_obligation_total': None, 'tas_accounts_total': None, 'tas_obligation_not_in_gtas_total': None, 'missing_tas_accounts_count': None}, 'obligation_difference': None, 'unlinked_contract_award_count': None, 'unlinked_assistance_award_count': None, 'assurance_statement_url': None}] assert (response['results'] == expected_results)
class OptionPlotoptionsTimelineSonificationContexttracksMappingHighpassFrequency(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class THBattleFaithBootstrap(BootstrapAction): game: 'THBattleFaith' def __init__(self, params: Dict[(str, Any)], items: Dict[(Player, List[GameItem])], players: BatchList[Player]): self.source = self.target = None self.params = params self.items = items self.players = players def apply_action(self) -> bool: g = self.game params = self.params pl = self.players g.deck = Deck(g) g.roles = {} (H, M) = (THBFaithRole.HAKUREI, THBFaithRole.MORIYA) if params['random_seat']: orig_pl = BatchList(pl) seed = get_seed_for(g, pl) random.Random(seed).shuffle(pl) g.emit_event('reseat', (orig_pl, pl)) L = [[H, H, M, M, H, M], [H, M, H, M, H, M]] rnd = random.Random(get_seed_for(g, pl)) L = (rnd.choice(L) * 2) s = rnd.randrange(0, 6) rl = L[s:(s + 6)] del L, s, rnd else: rl = [H, M, H, M, H, M] for (p, role) in zip(pl, rl): g.roles[p] = PlayerRole(THBFaithRole) g.roles[p].set(role) g.process_action(RevealRole(p, g.roles[p], pl)) g.forces = {H: BatchList(), M: BatchList()} g.pool = {H: BatchList(), M: BatchList()} for p in pl: g.forces[g.roles[p].get()].append(p) roll_rst = roll(g, pl, self.items) from . import characters chars = characters.get_characters('common', 'faith') (choices, _) = build_choices(g, pl, self.items, chars, spec={p: {'num': 4} for p in pl}) rst = g.user_input(pl, SortCharacterInputlet(g, choices, 2), timeout=30, type='all') g.players = BatchList([Character(p) for p in pl]) first: Character for (i, ch) in enumerate(g.players): p = ch.player (a, b) = [choices[p][i] for i in rst[p][:2]] ch = g.switch_character(ch, a) if (p is roll_rst[0]): first = ch first_index = i g.players[i] = ch b.chosen = None g.forces[g.roles[p].get()].reveal(b) g.pool[g.roles[p].get()].append(b) order = BatchList(range(len(pl))).rotate_to(first_index) g.emit_event('game_begin', g) for ch in g.players: g.process_action(DistributeCards(ch, amount=4)) reordered = g.players.rotate_to(first) rst = g.user_input(reordered[1:], ChooseOptionInputlet(DeathHandler(g), (False, True)), type='all') for p in reordered[1:]: (rst.get(p) and g.process_action(RedrawCards(p, p))) for (i, idx) in enumerate(cycle(order)): if (i >= 6000): break g.round = ((i // 6) + 1) ch = g.players[idx] if ch.dead: continue try: g.process_action(PlayerTurn(ch)) except InterruptActionFlow: pass return True
class L2Distance(Loss): def __init__(self, *, normalize: bool=True): self.normalize = normalize def __call__(self, guesses: Floats2d, truths: Floats2d) -> Tuple[(Floats2d, float)]: return (self.get_grad(guesses, truths), self.get_loss(guesses, truths)) def get_grad(self, guesses: Floats2d, truths: Floats2d) -> Floats2d: if (guesses.shape != truths.shape): err = f'Cannot calculate L2 distance: mismatched shapes: {guesses.shape} vs {truths.shape}.' raise ValueError(err) difference = (guesses - truths) if self.normalize: difference = (difference / guesses.shape[0]) return difference def get_loss(self, guesses: Floats2d, truths: Floats2d) -> float: if (guesses.shape != truths.shape): err = f'Cannot calculate L2 distance: mismatched shapes: {guesses.shape} vs {truths.shape}.' raise ValueError(err) d_truth = self.get_grad(guesses, truths) return (d_truth ** 2).sum()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest): DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}}, {'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}, {'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}}, {'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}}, {'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}}, {'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}}] wpasupplicant_conf_1 = '\nap_scan=0\nnetwork={\n key_mgmt=IEEE8021X\n eap=MD5\n identity="filter_id_user_accept"\n password="accept_pass"\n}\n ' wpasupplicant_conf_2 = '\nap_scan=0\nnetwork={\n key_mgmt=IEEE8021X\n eap=MD5\n identity="filter_id_user_deny"\n password="deny_pass"\n}\n ' CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "untagged"\nacls:\n accept_acl:\n dot1x_assigned: True\n rules:\n - rule:\n dl_type: 0x800 # Allow ICMP / IPv4\n ip_proto: 1\n actions:\n allow: True\n - rule:\n dl_type: 0x0806 # ARP Packets\n actions:\n allow: True\n deny_acl:\n dot1x_assigned: True\n rules:\n - rule:\n dl_type: 0x800 # Deny ICMP / IPv4\n ip_proto: 1\n actions:\n allow: False\n - rule:\n dl_type: 0x0806 # ARP Packets\n actions:\n allow: True\n ' CONFIG = '\n dot1x:\n nfv_intf: NFV_INTF\n nfv_sw_port: %(port_4)d\n radius_ip: 127.0.0.1\n radius_port: RADIUS_PORT\n radius_secret: SECRET\n interfaces:\n %(port_1)d:\n name: b1\n description: "b1"\n native_vlan: 100\n # 802.1x client.\n dot1x: True\n dot1x_dyn_acl: True\n %(port_2)d:\n name: b2\n description: "b2"\n native_vlan: 100\n # 802.1X client.\n dot1x: True\n dot1x_dyn_acl: True\n %(port_3)d:\n name: b3\n description: "b3"\n native_vlan: 100\n # ping host.\n %(port_4)d:\n name: b4\n description: "b4"\n output_only: True\n # "NFV host - interface used by controller."\n ' def test_untagged(self): port_no1 = self.port_map['port_1'] port_no2 = self.port_map['port_2'] self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=False) self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(), require_host_learned=False, expected_result=False) self.assertTrue(self.try_8021x(self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False)) self.assertTrue(self.try_8021x(self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False)) self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True) self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(), require_host_learned=False, expected_result=False) self.post_test_checks()
def create_award_type_aliases(client, config): for (award_type, award_type_codes) in INDEX_ALIASES_TO_AWARD_TYPES.items(): alias_name = f"{config['query_alias_prefix']}-{award_type}" if config['verbose']: msg = f"Putting alias '{alias_name}' on {config['index_name']} with award codes {award_type_codes}" logger.info(format_log(msg, action='ES Alias')) alias_body = {'filter': {'terms': {'type': award_type_codes}}} put_alias(client, config['index_name'], alias_name, alias_body)
class TestSyncMimeliteServers(): def _fake_client(self, dataset=None): if (dataset is None): dataset = [torch.rand(5, 2) for _ in range(3)] dataset = DatasetFromList(dataset) dataset = DummyUserData(dataset, SampleNet(SampleFC())) clnt = MimeLiteClient(dataset=dataset, **OmegaConf.structured(MimeLiteClientConfig(optimizer=LocalOptimizerSGDConfig(lr=0.2, momentum=0.9)))) return clnt def test_mimelite_training(self): dataset1 = [torch.tensor([[0.6], [0.4]]), torch.tensor([[0.2]])] dataset2 = [torch.tensor([[0.1], [0.8]])] dataset1 = DatasetFromList(dataset1) dataset1 = DummyUserData(dataset1, SampleNet(SampleFC())) clnt1 = self._fake_client(dataset1) dataset2 = DatasetFromList(dataset2) dataset2 = DummyUserData(dataset2, SampleNet(SampleFC())) clnt2 = self._fake_client(dataset2) clients = [clnt1, clnt2] server_model = SampleNet(SampleFC()) server = instantiate(SyncMimeLiteServerConfig(server_optimizer=FedAvgWithLROptimizerConfig(lr=1.0, momentum=0.9)), global_model=server_model) for _ in range(5): server.init_round() broadcast_message = server.broadcast_message_to_clients(clients) for clnt in clients: (delta, weight) = clnt.generate_local_update(broadcast_message) server.receive_update_from_client(Message(delta, weight)) server.step() assert torch.allclose(server_model.fl_get_module().fc, torch.tensor([2.30543])), 'Model parameter does not match after 5 rounds' def _create_fake_clients(self, num_clients) -> List[MimeLiteClient]: return [self._fake_client() for _ in range(num_clients)] .parametrize('num_clients', [10, 1]) def test_broadcast_message(self, num_clients) -> None: server_model = SampleNet(create_model_with_value(0)) server = instantiate(SyncMimeLiteServerConfig(), global_model=server_model) server.init_round() clients = self._create_fake_clients(num_clients) server_state_message = server.broadcast_message_to_clients(clients) assertIsInstance(server_state_message, Message) assertEqual(server_model, server_state_message.model) error_msg = verify_optimizer_state_dict_equal(server._optimizer.state_dict()['state'], server_state_message.server_opt_state) assertEmpty(error_msg, msg=error_msg)
class JSONTranslator(): def process_request(self, req, resp): if (req.content_length in (None, 0)): return body = req.bounded_stream.read() if (not body): raise falcon.HTTPBadRequest(title='Empty request body', description='A valid JSON document is required.') try: req.context.doc = json.loads(body.decode('utf-8')) except (ValueError, UnicodeDecodeError): description = 'Could not decode the request body. The JSON was incorrect or not encoded as UTF-8.' raise falcon.HTTPBadRequest(title='Malformed JSON', description=description) def process_response(self, req, resp, resource, req_succeeded): if (not hasattr(resp.context, 'result')): return resp.text = json.dumps(resp.context.result)
class TestMatrixStoreBuildEndToEnd(TestMatrixStoreBuild): def create_matrixstore(cls, data_factory, end_date, number_of_months): cls.tempdir = tempfile.mkdtemp() cls.data_file = import_test_data_full(cls.tempdir, data_factory, end_date, months=number_of_months) def tearDownClass(cls): shutil.rmtree(cls.tempdir) def setUp(self): if (not os.path.exists(self.data_file)): raise RuntimeError('No SQLite file created') self.connection = sqlite3.connect(self.data_file) def tearDown(self): self.connection.close() def test_same_file_produced_by_import_test_data_fast(self): orig_sqlite3 = sys.modules.pop('sqlite3') from sqlite3.dump import _iterdump sys.modules['sqlite3'] = orig_sqlite3 other_connection = sqlite3.connect(':memory:') import_test_data_fast(other_connection, self.data_factory, self.end_date, self.number_of_months) db_dump = list(_iterdump(self.connection)) other_db_dump = list(_iterdump(other_connection)) self.assertEqual(db_dump, other_db_dump)
class DuschinskyResult(): J: np.ndarray K: np.ndarray ref: DuschinskyRef def to_K_unitless(self, wavenums): wavenums_m = (wavenums * 100.0) conv = ((1 / np.sqrt((HBAR / ((((2 * np.pi) * wavenums_m) * C) * (BOHR2M ** 2))))) * AMU2KG_SQRT) K_unitless = (self.K * conv) return K_unitless
class OptionSeriesVennSonificationDefaultinstrumentoptionsActivewhen(Options): def crossingDown(self): return self._config_get(None) def crossingDown(self, num: float): self._config(num, js_type=False) def crossingUp(self): return self._config_get(None) def crossingUp(self, num: float): self._config(num, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get(None) def prop(self, text: str): self._config(text, js_type=False)
class CosmTradeDialogues(Model, BaseCosmTradeDialogues): def __init__(self, **kwargs: Any) -> None: Model.__init__(self, **kwargs) def role_from_first_message(message: Message, receiver_address: Address) -> Dialogue.Role: return CosmTradeDialogue.Role.AGENT BaseCosmTradeDialogues.__init__(self, self_address=self.context.agent_address, role_from_first_message=role_from_first_message, dialogue_class=CosmTradeDialogue)
.skipif(('pandas' not in sys.modules), reason='Pandas is not installed.') def test_union(): import pandas as pd def t1(data: Annotated[(Union[(np.ndarray, pd.DataFrame, Sequence)], 'some annotation')]): print(data) task_spec = get_serializable(OrderedDict(), serialization_settings, t1) variants = task_spec.template.interface.inputs['data'].type.union_type.variants assert (variants[0].blob.format == 'NumpyArray') assert (variants[1].structured_dataset_type.format == '') assert (variants[2].blob.format == FlytePickleTransformer.PYTHON_PICKLE_FORMAT)
def test_parse_color() -> None: bad = ['', 'aaa', 'midnightblack', '#123', '#12345', '#1234567'] for s in bad: with pytest.raises(ValueError): staticmaps.parse_color(s) good = ['0x1a2b3c', '0x1A2B3C', '#1a2b3c', '0x1A2B3C', '0x1A2B3C4D', 'black', 'RED', 'Green', 'transparent'] for s in good: staticmaps.parse_color(s)
class RateTestCase(unittest.TestCase): def test_timebase_argument_skipped(self): r = Rate() self.assertEqual(r.timebase, '25') def test_timebase_argument_is_None(self): r = Rate(timebase=None) self.assertEqual(r.timebase, '25') def test_timebase_attribute_is_set_to_None(self): r = Rate(timebase='25') r.timebase = None self.assertEqual(r.timebase, '25') def test_timebase_argument_is_not_a_str_value(self): with self.assertRaises(TypeError) as cm: Rate(timebase=24) self.assertEqual(cm.exception.message, 'Rate.timebase should be a str, not int') def test_timebase_attribute_is_not_a_str(self): r = Rate(timebase='12') with self.assertRaises(TypeError) as cm: r.timebase = 15 self.assertEqual(cm.exception.message, 'Rate.timebase should be a str, not int') def test_timebase_argument_is_working_properly(self): r = Rate(timebase='12') self.assertEqual('12', r.timebase) def test_timebase_attribute_is_working_properly(self): r = Rate(timebase='12') r.timebase = '15' self.assertEqual('15', r.timebase) def test_ntsc_argument_is_skipped(self): r = Rate() self.assertFalse(r.ntsc) def test_ntsc_argument_is_None(self): r = Rate() self.assertFalse(r.ntsc) def test_ntsc_attribute_is_set_to_None(self): r = Rate(ntsc=True) self.assertTrue(r.ntsc) r.ntsc = None self.assertFalse(r.ntsc) def test_ntsc_argument_is_not_a_bool_value(self): with self.assertRaises(TypeError) as cm: r = Rate(ntsc='not a bool value') self.assertEqual(str(cm.exception), 'Rate.ntsc should be a bool value, not str') def test_ntsc_attribute_is_set_to_a_non_bool_value(self): r = Rate(ntsc=True) with self.assertRaises(TypeError) as cm: r.ntsc = 'not a bool value' self.assertEqual(str(cm.exception), 'Rate.ntsc should be a bool value, not str') def test_ntsc_argument_is_working_properly(self): r = Rate(ntsc=True) self.assertEqual(r.ntsc, True) def test_ntsc_attribute_is_working_properly(self): r = Rate(ntsc=True) self.assertEqual(r.ntsc, True) r.ntsc = False self.assertEqual(r.ntsc, False) def test_to_xml_method_is_working_properly(self): r = Rate(timebase='25', ntsc=False) self.assertEqual(r.to_xml(), '<rate>\n <timebase>25</timebase>\n <ntsc>FALSE</ntsc>\n</rate>') def test_from_xml_method_is_working_properly(self): r = Rate(timebase='24', ntsc=False) from xml.etree import ElementTree rate_node = ElementTree.Element('rate') timebase_node = ElementTree.SubElement(rate_node, 'timebase') timebase_node.text = '25' ntsc_node = ElementTree.SubElement(rate_node, 'ntsc') ntsc_node.text = 'TRUE' r.from_xml(rate_node) self.assertEqual(r.timebase, '25') self.assertEqual(r.ntsc, True)
def test_adding_serviceaccount_annotations(): config = '\nrbac:\n create: true\n serviceAccountAnnotations:\n eks.amazonaws.com/role-arn: arn:aws:iam:::role/k8s.clustername.namespace.serviceaccount\n' r = helm_template(config) assert (r['serviceaccount'][uname]['metadata']['annotations']['eks.amazonaws.com/role-arn'] == 'arn:aws:iam:::role/k8s.clustername.namespace.serviceaccount')
def extractScryaTranslations(item): (chp, vol, frag) = extractChapterVolFragment(item['title']) if ("So What if It's an RPG World!?" in item['tags']): return buildReleaseMessageWithType(item, "So What if It's an RPG World!?", vol, chp, frag=frag) if ('My Disciple Died Yet Again' in item['tags']): return buildReleaseMessageWithType(item, 'My Disciple Died Yet Again', vol, chp, frag=frag) if ('[Disciple] Releases' in item['tags']): return buildReleaseMessageWithType(item, 'My Disciple Died Yet Again', vol, chp, frag=frag) return False
def get_featmetainfo(desc_file, feat_name): f = open(desc_file) for line in f: line = line.split('\n')[0] feat = line.split('|')[0] if (feat_name == feat): (feat_length, feat_type) = (line.split('|')[1], line.split('|')[2]) return (feat_length, feat_type)
class TorchScriptOp(OperatorInterface): def __init__(self, func_name: str): super(TorchScriptOp, self).__init__() self.func_name: str = func_name self.func: Callable = None self.fwd_out: torch.tensor = None self.grad_in: torch.tensor = None def build(self, op_schema: str): def _extract_types(types_str: str): types = [item for item in types_str.split(',')] types = [item.strip().split(' ')[0] for item in types if ('*' not in item)] types = [re.sub('\\[[0-9]\\]', '[]', t) for t in types] var_types = [(item if ('Tensor' not in item) else 'Tensor') for item in types] return var_types assert op_schema, f'TorchScriptOp {self.func_name} should have at non-empty op schema.' (func_name, func_signature) = op_schema.split('(', 1) (arg_str, output_str) = func_signature.split('->', 1) arg_str = arg_str.strip('() ') output_str = output_str.strip('() ') arg_types = _extract_types(arg_str) output_types = _extract_types(output_str) graph_args = [] func_args = [] func_schema = torch._C.parse_schema(op_schema) register_id = 0 for data_type in arg_types: graph_args.append(f'%{register_id} : {data_type}') func_args.append(f'%{register_id}') register_id += 1 func_outputs = [] func_output_vars = [] func_output_types = [] for data_type in output_types: func_outputs.append(f'%{register_id} : {data_type}') func_output_vars.append(f'%{register_id}') func_output_types.append(data_type) output_var = f'%{register_id}' register_id += 1 return_construct = '' if (len(func_outputs) > 1): return_construct = f"%{register_id}: ({','.join(func_output_types)}) = prim::TupleConstruct({','.join(func_output_vars)})" output_var = f'%{register_id}' actual_func_name = func_schema.name ts_ir = f''' graph({','.join(graph_args)}): {','.join(func_outputs)} = {actual_func_name}({','.join(func_args)}) {return_construct} return ({output_var}) ''' ts_graph = torch._C.parse_ir(ts_ir) logger.debug(f'''{self.func_name} TorchScript IR Graph: {ts_graph}''') cu = torch._C.CompilationUnit() self.func = cu.create_function(self.func_name, ts_graph) def cleanup(self): self.fwd_out = None self.grad_in = None def forward(self, *args, **kwargs): self.fwd_out = self.func(*args, **kwargs) return self.fwd_out def create_grad(self): if (not self.fwd_out.is_leaf): self.grad_in = torch.ones_like(self.fwd_out) else: logger.debug(f'{self.func_name}: skipping create_grad() due to forward result is leaf tensor.') def backward(self): if (not self.fwd_out.is_leaf): self.fwd_out.backward(self.grad_in) else: logger.debug(f'{self.func_name}: skipping backward() due to forward result is leaf tensor.')
def install_py_deps(deps_list): if (sys.prefix == sys.base_prefix): if ((get_distro() != 'guix') and os.path.exists(os.path.join(sysconfig.get_path('stdlib', (sysconfig.get_default_scheme() if hasattr(sysconfig, 'get_default_scheme') else sysconfig._get_default_scheme())), 'EXTERNALLY-MANAGED'))): command = [PIP_CMD, 'install', '--user', '--break-system-packages', '-U'] else: command = [PIP_CMD, 'install', '--user', '-U'] else: command = [PIP_CMD, 'install', '-U'] command.extend(deps_list) try: run_command(command) except Exception as e: print('Error:', e) install_failed_pys.append(' '.join(command))
class DisplayPackagedMessageAction(argparse.Action): def __call__(self, parser, namespace, value, option_string=None): print('E: You cannot perform this action:') print('You installed Nautilus Terminal using a distribution package. If you encounter an issue, try to reinstall it or contact its maintainer.') sys.exit(1)
def generate_exercise(env: Environment, spec_path: Path, exercise: Path, check: bool=False): slug = exercise.name meta_dir = (exercise / '.meta') plugins_module = None plugins_name = 'plugins' plugins_source = (meta_dir / f'{plugins_name}.py') try: if plugins_source.is_file(): plugins_spec = importlib.util.spec_from_file_location(plugins_name, plugins_source) plugins_module = importlib.util.module_from_spec(plugins_spec) sys.modules[plugins_name] = plugins_module plugins_spec.loader.exec_module(plugins_module) try: test_opts = TestsTOML.load((meta_dir / 'tests.toml')) except FileNotFoundError: logger.error(f'{slug}: tests.toml not found; skipping.') return True spec = load_canonical(slug, spec_path, test_opts) additional_tests = load_additional_tests(exercise) spec['additional_cases'] = additional_tests template_path = (exercise.relative_to('exercises') / '.meta/template.j2') if ('\\' in str(template_path)): template_path = PureWindowsPath(template_path).as_posix() template = env.get_template(str(template_path)) tests_path = (exercise / f'{to_snake(slug)}_test.py') spec['has_error_case'] = has_error_case(spec['cases']) if (plugins_module is not None): spec[plugins_name] = plugins_module logger.debug(f'{slug}: attempting render') rendered = template.render(**spec) with NamedTemporaryFile('w', delete=False) as tmp: logger.debug(f'{slug}: writing render to tmp file {tmp.name}') tmpfile = Path(tmp.name) tmp.write(rendered) try: logger.debug(f'{slug}: formatting tmp file {tmpfile}') format_file(tmpfile) except FileNotFoundError as e: logger.error(f'{slug}: the black utility must be installed') return False if check: return check_template(slug, tests_path, tmpfile) else: logger.debug(f'{slug}: moving tmp file {tmpfile}->{tests_path}') shutil.move(tmpfile, tests_path) print(f'{slug} generated at {tests_path}') except (TypeError, UndefinedError, SyntaxError) as e: logger.debug(str(e)) logger.error(f'{slug}: generation failed') return False except TemplateNotFound as e: logger.debug(str(e)) logger.info(f'{slug}: no template found; skipping') except FileNotFoundError as e: logger.debug(str(e)) logger.info(f'{slug}: no canonical data found; skipping') return True
class TaskResolverMixin(object): def location(self) -> str: pass def name(self) -> str: pass def load_task(self, loader_args: List[str]) -> Task: pass def loader_args(self, settings: SerializationSettings, t: Task) -> List[str]: pass def get_all_tasks(self) -> List[Task]: pass def task_name(self, t: Task) -> Optional[str]: return None
def xx_disabled_test_integrate(): res = [] def calllater(f): res.append(f) ori = event.loop._call_soon_func foo = Foo() event.loop.integrate(calllater) foo.emit('foo', {}) foo.emit('foo', {}) assert ((len(res) == 1) and (res[0].__name__ == 'iter')) with raises(ValueError): event.loop.integrate('not a callable') event.loop._call_soon_func = ori
class MockView(): endianness = 0 sections = {} address_size = 32 def update_analysis_and_wait(self): pass def get_tags_at(self, _): return list() def get_data_var_at(self, _): return None def get_symbol_at(self, _): return None def get_function_at(self, _): return None
def show_example_dialog(): ImGui.SetNextWindowPos(ImGui.ImVec2(10.0, 70.0), ImGui.ImGuiCond_FirstUseEver) ImGui.SetNextWindowSize(ImGui.ImVec2(300.0, 500.0), ImGui.ImGuiCond_FirstUseEver) ImGui.Begin('\uf080 Statistics') renderer_name = bgfx.getRendererName(bgfx.getRendererType()) ImGui.TextWrapped('Current renderer: {}'.format(renderer_name)) ImGui.Separator() stats = bgfx.getStats() to_ms_cpu = (1000.0 / stats.cpuTimerFreq) to_ms_gpu = (1000.0 / stats.gpuTimerFreq) frame_ms = max(float((stats.cpuTimeEnd - stats.cpuTimeBegin)), 1e-09) s_frame_time.push_sample((frame_ms * to_ms_cpu)) frame_text_overlay = f'''{s_frame_time.m_min:7.3f}ms, {s_frame_time.m_max:7.3f}ms Avg: {s_frame_time.m_avg:7.3f}ms, {(stats.cpuTimerFreq / frame_ms):6.2f} FPS''' ImGui.PushStyleColor(ImGui.ImGuiCol_PlotHistogram, ImGui.ImVec4(0.0, 0.5, 0.15, 1.0)) ImGui.PushItemWidth((- 1)) ImGui.PlotHistogram('', array('f', s_frame_time.m_values), 100, s_frame_time.m_offset, frame_text_overlay, 0.0, 60.0, ImGui.ImVec2(0.0, 45.0)) ImGui.PopItemWidth() ImGui.PopStyleColor() ImGui.Text(f'Submit CPU {((stats.cpuTimeEnd - stats.cpuTimeBegin) * to_ms_cpu):.3f}, GPU {((stats.gpuTimeEnd - stats.gpuTimeBegin) * to_ms_gpu):.3f} (L: {stats.maxGpuLatency})') if (stats.gpuMemoryMax > 0): ImGui.Text(f'GPU mem: {stats.gpuMemoryUsed} / {stats.gpuMemoryMax}') if ImGui.CollapsingHeader('\uf12e Resources', ImGui.ImGuiTreeNodeFlags_DefaultOpen): caps = bgfx.getCaps() item_height = ImGui.GetTextLineHeightWithSpacing() max_width = 90.0 ImGui.PushFont(ImGui.Font.Mono) ImGui.Text('Res: Num / Max') resource_bar('DIB', 'Dynamic index buffers', stats.numDynamicIndexBuffers, caps.limits.maxDynamicIndexBuffers, max_width, item_height) resource_bar('DVB', 'Dynamic vertex buffers', stats.numDynamicVertexBuffers, caps.limits.maxDynamicVertexBuffers, max_width, item_height) resource_bar(' FB', 'Frame buffers', stats.numFrameBuffers, caps.limits.maxFrameBuffers, max_width, item_height) resource_bar(' IB', 'Index buffers', stats.numIndexBuffers, caps.limits.maxIndexBuffers, max_width, item_height) resource_bar(' OQ', 'Occlusion queries', stats.numOcclusionQueries, caps.limits.maxOcclusionQueries, max_width, item_height) resource_bar(' P', 'Programs', stats.numPrograms, caps.limits.maxPrograms, max_width, item_height) resource_bar(' S', 'Shaders', stats.numShaders, caps.limits.maxShaders, max_width, item_height) resource_bar(' T', 'Textures', stats.numTextures, caps.limits.maxTextures, max_width, item_height) resource_bar(' U', 'Uniforms', stats.numUniforms, caps.limits.maxUniforms, max_width, item_height) resource_bar(' VB', 'Vertex buffers', stats.numVertexBuffers, caps.limits.maxVertexBuffers, max_width, item_height) resource_bar(' VL', 'Vertex layouts', stats.numVertexLayouts, caps.limits.maxVertexLayouts, max_width, item_height) ImGui.PopFont() ImGui.End()
def test(): assert (len(pattern1) == 2), 'pattern1' assert (len(pattern2) == 4), 'pattern2' assert (len(pattern1[0]) == 1), 'pattern1' assert any(((pattern1[0].get(attr) == 'amazon') for attr in ('lower', 'LOWER'))), 'pattern1' assert (len(pattern1[1]) == 2), 'pattern122' assert any(((pattern1[1].get(attr) == True) for attr in ('is_title', 'IS_TITLE'))), 'pattern2' assert any(((pattern1[1].get(attr) == 'NOUN') for attr in ('pos', 'POS'))), 'pattern2' assert any(((pattern2[0].get(attr) == 'ad') for attr in ('lower', 'LOWER'))), 'pattern2' assert any(((pattern2[2].get(attr) == 'free') for attr in ('lower', 'LOWER'))), 'pattern23' assert any(((pattern2[3].get(attr) == 'NOUN') for attr in ('pos', 'POS'))), 'pattern24' assert (len(matcher(doc)) == 6), '6' __msg__.good("Well done!'-''TEXT''LOWER''SHAPE'MatcherPhraseMatcher")
class OptionSeriesWordcloudZones(Options): def className(self): return self._config_get(None) def className(self, text: str): self._config(text, js_type=False) def color(self): return self._config_get(None) def color(self, text: str): self._config(text, js_type=False) def dashStyle(self): return self._config_get(None) def dashStyle(self, text: str): self._config(text, js_type=False) def fillColor(self): return self._config_get(None) def fillColor(self, text: str): self._config(text, js_type=False)
class OptionSeriesDumbbellSonificationContexttracksMappingTremoloSpeed(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def test_remove_circular_dependency_no(graph_no_dependency): (nodes, _, cfg) = graph_no_dependency instructions = [inst.copy() for inst in cfg.instructions] remove_circular_dependency(cfg) assert ((nodes[0].instructions == [instructions[0]]) and (set(nodes[1].instructions[:4]) == set(instructions[1:5])) and (nodes[1].instructions[4:] == instructions[5:7]) and (nodes[2].instructions == instructions[7:]))
class GreetingWorkflowImpl(GreetingWorkflow): async def get_greeting(self): global a, b, c a.append(str(Workflow.random_uuid())) (await Workflow.sleep(1)) b.append(str(Workflow.random_uuid())) (await Workflow.sleep(1)) c.append(str(Workflow.random_uuid())) (await Workflow.sleep(1))
class AgentResource(): type: str name: str introduce: str def from_dict(d: Dict[(str, Any)]) -> Optional[AgentResource]: if (d is None): return None return AgentResource(type=d.get('type'), name=d.get('name'), introduce=d.get('introduce')) def to_dict(self) -> Dict[(str, Any)]: return dataclasses.asdict(self)
class OptionSeriesItemSonificationContexttracksPointgrouping(Options): def algorithm(self): return self._config_get('minmax') def algorithm(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def groupTimespan(self): return self._config_get(15) def groupTimespan(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get('y') def prop(self, text: str): self._config(text, js_type=False)
class TestGetAsyncSearch(): ('elasticsearch.Elasticsearch') .asyncio async def test_get_async_search(self, es): es.async_search.get = mock.AsyncMock(return_value={'is_running': False, 'response': {'took': 1122, 'timed_out': False, 'hits': {'total': {'value': 1520, 'relation': 'eq'}}}}) r = runner.GetAsyncSearch() params = {'retrieve-results-for': 'search-1'} async with runner.CompositeContext(): runner.CompositeContext.put('search-1', '12345') response = (await r(es, params)) assert (response == {'weight': 1, 'unit': 'ops', 'success': True, 'stats': {'search-1': {'hits': 1520, 'hits_relation': 'eq', 'timed_out': False, 'took': 1122}}}) es.async_search.get.assert_awaited_once_with(id='12345', params={})
def _get_synteny_scale(recipe, synteny_backend): if ('blocks' in recipe): if isinstance(recipe['blocks'], six.string_types): scale = config.vals['blocks'][recipe['blocks']] else: scale = recipe['blocks'] else: scale = config.vals['blocks'][synteny_backend.infer_block_scale(recipe)] logger.info("Running withs synteny block sizes '%s'", str(scale)) return scale
def app_config_from_env(app, prefix='FLASKBB_'): for (key, value) in os.environ.items(): if key.startswith(prefix): key = key[len(prefix):] try: value = ast.literal_eval(value) except (ValueError, SyntaxError): pass app.config[key] = value return app
class UART(_UARTPrimitive): def __init__(self, device: SerialHandler=None, *, baudrate: int=9600, bits: int=8, parity: Parity=None, stop: int=1, timeout: float=1): super().__init__(device) self._set_uart_baud(baudrate) if (bits == 8): pd = 0 elif (bits == 9): pd = 3 else: raise ValueError('Invalid number of bits') if ((bits == 9) and (parity is not None)): raise ValueError('Invalid parity') if (stop not in (1, 2)): raise ValueError('Invalid number of stop bits') pd += parity.value try: self._set_uart_mode(pd, (stop - 1)) except RuntimeError: pass self._timeout = timeout def baudrate(self): return self._baudrate def in_waiting(self): return self._read_uart_status() def timeout(self): return self._timeout def deinit(self) -> None: pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.deinit() def _read_with_timeout(self, nbytes: int=None, *, line=False): if (nbytes == 0): return None start_time = time.time() data = bytearray() total_read = 0 while ((time.time() - start_time) <= self._timeout): has_char = self._read_uart_status() if has_char: char = self._read_byte() start_time = time.time() if (line and (char == 10)): break data.append(char) total_read += 1 if (nbytes and (total_read == nbytes)): break time.sleep(0.01) return (bytes(data) if data else None) def read(self, nbytes: int=None) -> Optional[bytes]: return self._read_with_timeout(nbytes) def readinto(self, buf: WriteableBuffer) -> int: nbytes = len(buf) data = self._read_with_timeout(nbytes) if (data is None): return 0 else: nbuf = len(data) for (i, c) in zip(range(nbuf), data): buf[i] = c return nbuf def readline(self) -> Optional[bytes]: return self._read_with_timeout(None, line=True) def write(self, buf: ReadableBuffer) -> int: written = 0 for (msb, lsb) in zip_longest(buf[1::2], buf[::2]): if (msb is not None): self._write_int(((msb << 8) | lsb)) written += 2 else: self._write_byte(lsb) written += 1 return written
def pull_apps(apps=None, bench_path='.', reset=False): from bench.bench import Bench from bench.utils.app import get_current_branch, get_remote bench = Bench(bench_path) rebase = ('--rebase' if bench.conf.get('rebase_on_pull') else '') apps = (apps or bench.apps) excluded_apps = bench.excluded_apps if (not reset): for app in apps: if (app in excluded_apps): print(f'Skipping reset for app {app}') continue app_dir = get_repo_dir(app, bench_path=bench_path) if os.path.exists(os.path.join(app_dir, '.git')): out = subprocess.check_output('git status', shell=True, cwd=app_dir) out = out.decode('utf-8') if (not re.search('nothing to commit, working (directory|tree) clean', out)): print(f''' Cannot proceed with update: You have local changes in app "{app}" that are not committed. Here are your choices: 1. Merge the {app} app manually with "git pull" / "git pull --rebase" and fix conflicts. 1. Temporarily remove your changes with "git stash" or discard them completely with "bench update --reset" or for individual repositries "git reset --hard" 2. If your changes are helpful for others, send in a pull request via GitHub and wait for them to be merged in the core.''') sys.exit(1) for app in apps: if (app in excluded_apps): print(f'Skipping pull for app {app}') continue app_dir = get_repo_dir(app, bench_path=bench_path) if os.path.exists(os.path.join(app_dir, '.git')): remote = get_remote(app) if (not remote): add_to_excluded_apps_txt(app, bench_path=bench_path) print(f"Skipping pull for app {app}, since remote doesn't exist, and adding it to excluded apps") continue if ((not bench.conf.get('shallow_clone')) or (not reset)): is_shallow = os.path.exists(os.path.join(app_dir, '.git', 'shallow')) if is_shallow: s = (' to safely pull remote changes.' if (not reset) else '') print(f'Unshallowing {app}{s}') bench.run(f'git fetch {remote} --unshallow', cwd=app_dir) branch = get_current_branch(app, bench_path=bench_path) logger.log(f'pulling {app}') if reset: reset_cmd = f'git reset --hard {remote}/{branch}' if bench.conf.get('shallow_clone'): bench.run(f'git fetch --depth=1 --no-tags {remote} {branch}', cwd=app_dir) bench.run(reset_cmd, cwd=app_dir) bench.run('git reflog expire --all', cwd=app_dir) bench.run('git gc --prune=all', cwd=app_dir) else: bench.run('git fetch --all', cwd=app_dir) bench.run(reset_cmd, cwd=app_dir) else: bench.run(f'git pull {rebase} {remote} {branch}', cwd=app_dir) bench.run('find . -name "*.pyc" -delete', cwd=app_dir)
.parametrize('block_identifier,expected_output', ((1, 1), ((- 1), 0), ('latest', 'latest'), ('earliest', 'earliest'), ('pending', 'pending'), ('safe', 'safe'), ('finalized', 'finalized'))) def test_parse_block_identifier_int_and_string(w3, block_identifier, expected_output): block_id = parse_block_identifier(w3, block_identifier) assert (block_id == expected_output)
_test def test_parallel_logging() -> None: graph = MyDistributedGraph() graph.configure(MyDistributedConfig(output_filename=DISTRIBUTED_OUTPUT_FILENAME)) runner = ParallelRunner(graph=graph) runner.run() output_path = str((Path(runner._options.logger_config.output_directory) / Path(f'{runner._options.logger_config.recording_name}.h5'))) with h5py.File(output_path, 'r') as h5py_file: for hdf5_path in ('source_a', 'transform_c'): dataset = h5py_file[hdf5_path] assert (dataset.shape == (NUM_MESSAGES,)) dataset_nums = {int(num[0]) for num in dataset} assert (dataset_nums == set(range(NUM_MESSAGES)))
def test_aggregation_chain_fork(): class _EventInterface(ABC): _epoch_stats(sum, input_name='attr1_sum') _epoch_stats(np.mean, input_name='attr2_mean') _episode_stats(sum, input_name='attr1_sum') _episode_stats(np.mean, input_name='attr2_mean') _step_stats(sum, input_name='attr1', output_name='attr1_sum') _step_stats(np.mean, input_name='attr1', output_name='attr1_mean') _step_stats(sum, input_name='attr2', output_name='attr2_sum') _step_stats(np.mean, input_name='attr2', output_name='attr2_mean') def event1(self, attr1, attr2): pass agg_episode = LogStatsAggregator(LogStatsLevel.EPOCH) agg_step = LogStatsAggregator(LogStatsLevel.EPISODE, agg_episode) agg_event = LogStatsAggregator(LogStatsLevel.STEP, agg_step) no_steps = 5 no_episodes = 7 for episode in range(no_episodes): for step in range(no_steps): agg_event.add_event(EventRecord(_EventInterface, _EventInterface.event1, dict(attr1=2.0, attr2=(- 2.0)))) agg_event.add_event(EventRecord(_EventInterface, _EventInterface.event1, dict(attr1=3.0, attr2=(- 3.0)))) step_stats = agg_event.reduce() assert (len(step_stats) == 4) value1_sum = step_stats[(_EventInterface.event1, 'attr1_sum', None)] value1_mean = step_stats[(_EventInterface.event1, 'attr1_mean', None)] value2_sum = step_stats[(_EventInterface.event1, 'attr2_sum', None)] value2_mean = step_stats[(_EventInterface.event1, 'attr2_mean', None)] assert (value1_sum == 5.0) assert (value1_mean == 2.5) assert (value2_sum == (- 5.0)) assert (value2_mean == (- 2.5)) episode_stats = agg_step.reduce() assert (len(episode_stats) == 2) value1 = episode_stats[(_EventInterface.event1, 'attr1_sum', None)] value2 = episode_stats[(_EventInterface.event1, 'attr2_mean', None)] assert (value1 == (no_steps * 5.0)) assert (value2 == (- 2.5)) epoch_stats = agg_episode.reduce() assert (len(epoch_stats) == 2) value1 = epoch_stats[(_EventInterface.event1, 'attr1_sum', None)] value2 = epoch_stats[(_EventInterface.event1, 'attr2_mean', None)] assert (value1 == ((no_episodes * no_steps) * 5.0)) assert (value2 == (- 2.5))
def topic_card_body(topics: List[Tuple[(str, float)]]) -> HTML: html = "\n <div class='flex-row w-100' style='margin-top: 20px; flex-wrap: wrap;'>\n <div style='min-width: 400px; flex: 1 1 auto;'>\n <div class='w-100 ta_center bold'>All PDFs</div>\n <div id='siac-read-stats-topics-pc_1' class='w-100' style='height: 400px;'></div>\n </div> \n <div style='min-width: 400px; flex: 1 1 auto;'>\n <div class='w-100 ta_center bold'>Read last 7 days</div>\n <div id='siac-read-stats-topics-pc_2' class='w-100' style='height: 400px;'></div>\n </div> \n </div> \n " return html
class FourWellAnaPot(AnaPotBase): def __init__(self): V_str = 'x**4 + y**4 - 2*x**2 - 4*y**2 + x*y + 0.3*x + 0.1*y' xlim = ((- 1.75), 1.75) ylim = ((- 1.75), 1.75) minima = ((1., (- 1.), 0.0), ((- 0.), (- 1.), 0.0), ((- 1.), 1., 0.0)) super().__init__(V_str=V_str, xlim=xlim, ylim=ylim, minima=minima) def __str__(self): return 'FourWellAnaPot calculator'
def test_update_get_model(client: TestClient): response = client.patch('/models/test_model', json={'stateless': False, 'batch_size': 128, 'run_on_gpu': True}) model_update = response.json() assert (response.status_code == 200) response = client.get('/models/test_model') assert (response.status_code == 200) model_get = response.json() assert (model_update == model_get)
class Annotation(): thrift_spec = (None, (1, TType.I64, 'timestamp', None, None), (2, TType.STRING, 'value', None, None), (3, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None)) def __init__(self, timestamp=None, value=None, host=None): self.timestamp = timestamp self.value = value self.host = host def read(self, iprot): if ((iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated) and isinstance(iprot.trans, TTransport.CReadableTransport) and (self.thrift_spec is not None) and (fastbinary is not None)): fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if (ftype == TType.STOP): break if (fid == 1): if (ftype == TType.I64): self.timestamp = iprot.readI64() else: iprot.skip(ftype) elif (fid == 2): if (ftype == TType.STRING): self.value = iprot.readString() else: iprot.skip(ftype) elif (fid == 3): if (ftype == TType.STRUCT): self.host = Endpoint() self.host.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if ((oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated) and (self.thrift_spec is not None) and (fastbinary is not None)): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Annotation') if (self.timestamp is not None): oprot.writeFieldBegin('timestamp', TType.I64, 1) oprot.writeI64(self.timestamp) oprot.writeFieldEnd() if (self.value is not None): oprot.writeFieldBegin('value', TType.STRING, 2) oprot.writeString(self.value) oprot.writeFieldEnd() if (self.host is not None): oprot.writeFieldBegin('host', TType.STRUCT, 3) self.host.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = [('%s=%r' % (key, value)) for (key, value) in self.__dict__.iteritems()] return ('%s(%s)' % (self.__class__.__name__, ', '.join(L))) def __eq__(self, other): return (isinstance(other, self.__class__) and (self.__dict__ == other.__dict__)) def __ne__(self, other): return (not (self == other))
def update_warning_messages(database: DB, metadata_file: Path) -> None: warning_messages_from_metadata_file = json.loads(metadata_file.read_text())['codes'] warning_messages = {int(code): message for (code, message) in warning_messages_from_metadata_file.items()} models.create(database) warning_messages_table = Table(WarningMessage.__tablename__, WarningMessage.metadata) with database.make_session() as session: with session.connection() as database_connection: for (code, message) in warning_messages.items(): upsert_entry(database_connection, warning_messages_table, code, message) session.commit()
def write(self, command, method=None): if ((len(command) > 3) and (command[3] == 176) and (len(command) > 5)): command = ((((("'" + command[4]) + "'") + '(') + command[5]) + ')') command = command.encode() if (method == 'socket'): log_command = [] for i in command: if isinstance(command, str): log_command.append(command) elif isinstance(i, str): log_command.append(i) else: log_command.append(hex(i)) self.log.debug('_write: {}'.format(log_command)) py_version = check_python_version() if (py_version == 2): self.sock.sendall(''.join([chr(b) for b in command])) else: if isinstance(command, str): command = command.encode() else: command = bytes(command) self.sock.sendall(command) else: self._serial_port.reset_input_buffer() self.log.debug('_write: {}'.format([hex(i) for i in command])) self._serial_port.write(command) self._serial_port.flush()
class LogProgress(): def __init__(self, logger: logging.Logger, iterable: Iterable, updates: int=5, min_interval: int=1, time_per_it: bool=False, total: tp.Optional[int]=None, name: str='LogProgress', level: int=logging.INFO): self.iterable = iterable if (total is None): assert isinstance(iterable, Sized) total = len(iterable) self.total = total self.updates = updates self.min_interval = min_interval self.time_per_it = time_per_it self.name = name self.logger = logger self.level = level def update(self, **infos) -> bool: self._infos = infos return self._will_log def __iter__(self): self._iterator = iter(self.iterable) self._will_log = False self._index = (- 1) self._infos = {} self._begin = time.time() return self def __next__(self): if self._will_log: self._log() self._will_log = False try: value = next(self._iterator) except StopIteration: raise else: self._index += 1 if (self.updates > 0): log_every = max(self.min_interval, (self.total // self.updates)) if ((self._index >= 1) and ((self._index % log_every) == 0)): self._will_log = True return value def _log(self): self._speed = ((1 + self._index) / (time.time() - self._begin)) infos = ' | '.join((f'{k.capitalize()} {v}' for (k, v) in self._infos.items())) if (self._speed < 0.0001): speed = 'oo sec/it' elif (self.time_per_it and (self._speed < 1)): speed = f'{(1 / self._speed):.2f} sec/it' elif self.time_per_it: speed = f'{(1000 / self._speed):.1f} ms/it' elif (self._speed < 0.1): speed = f'{(1 / self._speed):.1f} sec/it' else: speed = f'{self._speed:.2f} it/sec' out = f'{self.name} | {self._index}/{self.total} | {speed}' if infos: out += (' | ' + infos) self.logger.log(self.level, out)
class HomeBrewTestCase(unittest.TestCase): def setUp(self): self.wf = Workflow() cask.wf = Workflow() brew.wf = Workflow() def test_get_all_formulae(self): result = brew.get_all_formulae() self.assertTrue((len(result) > 0)) def test_search_key_for_action(self): result = helpers.search_key_for_action({'name': 'a', 'description': 'b'}) self.assertEqual(result, 'a b') def test_brew_get_installed_packages(self): result = brew.get_installed_formulae() self.assertTrue((len(result) >= 0)) def test_brew_get_info(self): result = brew.get_info() self.assertTrue(all(((x in result) for x in ['kegs', 'files']))) def test_cask_get_all_casks(self): result = cask.get_all_casks() self.assertTrue((len(result) > 0)) def test_cask_get_installed_casks(self): result = cask.get_installed_casks() self.assertTrue((len(result) >= 0)) def test_cask_execute(self): for cmd in ['search', 'list']: result = cask.execute(cask.wf, ['brew', cmd, '--cask']) self.assertTrue((len(result) >= 0)) def test_brew_execute(self): for cmd in ['search', 'list']: result = brew.execute(brew.wf, ['brew', cmd]) self.assertTrue((len(result) > 0))
def parse_version(v): undotted = v.split('.') if (len(undotted) == 0): raise ValueError('Versio number cannot be empty') if (len(undotted) > 3): raise ValueError('Version number cannot have more than 3 dots') tag_match = re.match('([0-9]+)([a-z]+)([0-9]+)?', undotted[(- 1)]) if (tag_match is not None): (least_number, tag, tagnumber) = tag_match.groups() else: (least_number, tag, tagnumber) = (undotted[(- 1)], None, None) if (tagnumber is None): tagnumber = 0 release = (tuple(undotted[:(- 1)]) + (least_number,)) while (len(release) < 3): release = (release + (0,)) release = tuple((int(n) for n in release)) return (release, tag, int(tagnumber))
def cli() -> None: program = ArgumentParser(formatter_class=(lambda prog: HelpFormatter(prog, max_help_position=120))) program.add_argument('--torch', help=wording.get('install_dependency_help').format(dependency='torch'), choices=TORCH.keys()) program.add_argument('--onnxruntime', help=wording.get('install_dependency_help').format(dependency='onnxruntime'), choices=ONNXRUNTIMES.keys()) program.add_argument('--skip-venv', help=wording.get('skip_venv_help'), action='store_true') program.add_argument('-v', '--version', version=((metadata.get('name') + ' ') + metadata.get('version')), action='version') run(program)
class HttpWorker(Thread, HttpReq): def __init__(self, tname, task_queue, flt, suc, fail, headers={}, proxy=None, proxy_policy=None, retry=3, timeout=10, logger=None, keep_alive=None, stream_mode=False, lowspeed_threshold=None): HttpReq.__init__(self, headers, proxy, proxy_policy, retry, timeout, logger, tname=tname) Thread.__init__(self, name=tname) Thread.setDaemon(self, True) self.task_queue = task_queue self.logger = logger self._keepalive = keep_alive self._exit = (lambda x: False) self.flt = flt self.f_suc = suc self.f_fail = fail self.stream_mode = stream_mode self.stream_speed = None self.lowspeed_threshold = lowspeed_threshold self.zombie_threshold = (timeout * (retry + 1)) self.run_once = False def _finish_queue(self, *args): return (self.run_once and self.task_queue.empty()) def run(self): self.logger.verbose(('t-%s start' % self.name)) _stream_cb = None if self.stream_mode: self.stream_speed = speed_checker() def f(d): self.stream_speed.check(len(d)) if self.lowspeed_threshold: speed = self.stream_speed.calc(full=True) if (0 < speed < self.lowspeed_threshold): raise LowSpeedException('') self._keepalive(self) _stream_cb = f while ((not self._keepalive(self)) and (not self._exit(self))): try: url = self.task_queue.get(False) except Empty: if self.stream_speed: self.stream_speed.reset() time.sleep(1) continue self.run_once = True try: self.request('GET', url, self.flt, self.f_suc, self.f_fail, stream_cb=_stream_cb) except PoolException as ex: self.logger.warning(('%s-%s %s' % (i18n.THREAD, self.tname, str(ex)))) break except LowSpeedException as ex: self.logger.warning((i18n.THREAD_SPEED_TOO_LOW % (self.tname, util.human_size(self.stream_speed.calc(full=True)), util.human_size(self.lowspeed_threshold)))) self.flt(_FakeResponse(url), self.f_suc, self.f_fail) except Exception as ex: self.logger.warning((i18n.THREAD_UNCAUGHT_EXCEPTION % (self.tname, traceback.format_exc()))) self.flt(_FakeResponse(url), self.f_suc, self.f_fail) self.logger.verbose(('t-%s exit' % self.name)) self._keepalive(self, _exit=True)
def create_js_component_class(cls, cls_name, base_class='Component.prototype'): assert (cls_name != 'Component') mc = MetaCollector(cls) mc.meta['std_functions'].add('op_instantiate') total_code = [] funcs_code = [] const_code = [] err = 'Objects on JS Component classes can only be int, float, str, or a list/tuple thereof. Not %s -> %r.' total_code.append('\n'.join(get_class_definition(cls_name, base_class)).rstrip()) prefix = ('' if cls_name.count('.') else 'var ') total_code[0] = (prefix + total_code[0]) prototype_prefix = (('$' + cls_name.split('.')[(- 1)]) + '.') total_code.append(('var %s = %s.prototype;' % (prototype_prefix[:(- 1)], cls_name))) class_items = cls.__dict__.items() if (sys.version_info < (3, 6)): class_items = sorted(class_items) for (name, val) in class_items: if isinstance(val, ActionDescriptor): funcname = name code = mc.py2js(val._func, (prototype_prefix + funcname)) code = code.replace('super()', base_class) if val._func.__name__.startswith('flx_'): subname = name if (name.startswith('set_') or name.startswith('_set_')): subname = name[4:] code = code.replace('flx_name', ("'%s'" % subname)) code = code.replace('function (', ('function %s (' % val._func.__name__)) funcs_code.append(code.rstrip()) funcs_code.append(((prototype_prefix + funcname) + '.nobind = true;')) funcs_code.append('') elif isinstance(val, ReactionDescriptor): funcname = name code = mc.py2js(val._func, (prototype_prefix + funcname)) code = code.replace('super()', base_class) funcs_code.append(code.rstrip()) funcs_code.append(((prototype_prefix + funcname) + '.nobind = true;')) funcs_code.append((((prototype_prefix + funcname) + '._mode = ') + reprs(val._mode))) if val._connection_strings: funcs_code.append((((prototype_prefix + funcname) + '._connection_strings = ') + reprs(val._connection_strings))) funcs_code.append('') elif isinstance(val, EmitterDescriptor): funcname = name code = mc.py2js(val._func, (prototype_prefix + funcname)) code = code.replace('super()', base_class) funcs_code.append(code.rstrip()) funcs_code.append(((prototype_prefix + funcname) + '.nobind = true;')) funcs_code.append('') elif isinstance(val, Attribute): pass elif isinstance(val, Property): default_val = json.dumps(val._default) t = '%s_%s_value = %s;' const_code.append((t % (prototype_prefix, name, default_val))) elif isinstance(val, classmethod): pass elif (name.startswith('__') and (name not in OK_MAGICS) and (not name.endswith('_validate'))): pass elif (name.endswith('_validate') and hasattr(val, '__self__') and isinstance(val.__self__, Property)): prop_class = val.__self__.__class__ mod_name_parts = prop_class.__module__.split('.') module_ns = sys.modules[cls.__module__].__dict__ prop_class_name = prop_class.__name__ if (prop_class_name not in module_ns): if (('flx' in module_ns) and (mod_name_parts[0] == 'flexx')): prop_class_name = ('flx.' + prop_class_name) else: for ip in reversed(range(0, len(mod_name_parts))): if (mod_name_parts[ip] in module_ns): m = sys.modules['.'.join(mod_name_parts[:(ip + 1)])] if (m is module_ns[mod_name_parts[ip]]): for ip2 in range(ip, len(mod_name_parts)): m = sys.modules['.'.join(mod_name_parts[:(ip2 + 1)])] if (getattr(m, prop_class_name, None) is prop_class): break prop_class_name = (('.'.join(mod_name_parts[ip:(ip2 + 1)]) + '.') + prop_class_name) break t = ' = function (value) { return %s(value, %s, %s); }\n' code = ((prototype_prefix + name) + (t % ((prop_class_name + '.prototype._validate'), json.dumps(name[1:(- 9)]), json.dumps(val.__self__._data)))) funcs_code.append(code) mc.meta['vars_unknown'].add(prop_class_name) elif callable(val): code = mc.py2js(val, (prototype_prefix + name)) code = code.replace('super()', base_class) if val.__name__.startswith('flx_'): subname = (name[8:] if name.startswith('_mutate_') else name) code = code.replace('flx_name', ("'%s'" % subname)) funcs_code.append(code.rstrip()) funcs_code.append('') else: try: serialized = json.dumps(val) except Exception as err: raise ValueError(('Attributes on JS Component class must be JSON compatible.\n%s' % str(err))) const_code.append((((prototype_prefix + name) + ' = ') + serialized)) if const_code: total_code.append('') total_code.extend(const_code) if funcs_code: total_code.append('') total_code.extend(funcs_code) total_code.append('') mc.meta['vars_unknown'].discard('flx_name') return mc.attach_meta('\n'.join(total_code))
def test_no_dict_action_wrapper(): base_env = GymMazeEnv(env='CartPole-v0') env = NoDictSpacesWrapper.wrap(base_env) assert isinstance(env.action_space, spaces.Discrete) assert isinstance(env.action_spaces_dict, dict) action = env.action_space.sample() out_action = env.action(action) assert isinstance(out_action, dict) assert (out_action['action'] == action) assert env.action_space.contains(env.reverse_action(out_action)) assert (env.reverse_action(out_action) == action) assert isinstance(env.observation_space, spaces.Box) assert isinstance(env.observation_spaces_dict, dict) assert isinstance(env.observation_space.sample(), np.ndarray) assert env.observation_space.contains(env.observation_space.sample()) assert env.observation_space.contains(env.reset())
class Message(): def __init__(self, location, kind, check_id, message, fatal, autofixed): assert isinstance(location, Location) assert (kind in ('info', 'style', 'metric', 'check', 'warning', 'lex error', 'error')) if (check_id is not None): assert isinstance(check_id, str) if (kind == 'style'): assert (check_id in STYLE_RULES) elif (kind == 'metric'): assert (check_id in METRICS) assert isinstance(message, str) assert isinstance(fatal, bool) assert isinstance(autofixed, bool) assert ((not fatal) or (kind in ('lex error', 'error'))), ('fatal=%s, kind=%s violates precondition' % (fatal, kind)) self.location = location self.kind = kind self.check_id = check_id self.severity = 'medium' self.message = message self.fixed = autofixed self.fatal = fatal self.justified = False def __str__(self): return ('Message(%s,%s,%s)' % (self.location, self.kind, repr(self.message))) def __lt__(self, other): assert isinstance(other, Message) return (self.location < other.location) def check_justification(self, justification): if ((self.kind == 'style') and isinstance(justification, Style_Justification)): if (self.location.line == justification.token.location.line): self.justified = True justification.used = True def to_json(self): return {'location': self.location.to_json(), 'kind': self.kind, 'severity': self.severity, 'message': self.message, 'fixable': self.fixed, 'fatal': self.fatal}
def extractWwwPurplecarnationCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
.parametrize('filename', ['inforec_03.lis', 'inforec_04.lis']) def test_inforec_unstructured(filename): path = ('data/lis/records/' + filename) (f,) = lis.load(path) wellsite = f.wellsite_data()[0] components = wellsite.components() assert (wellsite.isstructured() == False) assert (len(components) == 15) expected = ['WN ', 'ALLO', ' ', ' ', '15/9-F-15 ', 'CN ', 'ALLO', ' ', ' ', 'StatoilHydro', 'SRVC', 'ALLO', ' ', ' ', 'Geoservices '] for (i, cb) in enumerate(components): assert (cb.component == expected[i]) with pytest.raises(ValueError): _ = wellsite.table() with pytest.raises(ValueError): _ = wellsite.table_name()
.django_db def test_award_amount_success(client, monkeypatch, generic_account_data, unlinked_faba_account_data, helpers, elasticsearch_account_index): setup_elasticsearch_test(monkeypatch, elasticsearch_account_index) helpers.patch_datetime_now(monkeypatch, 2022, 12, 31) helpers.reset_dabs_cache() resp = helpers.post_for_amount_endpoint(client, url, ['L'], ['A', '09', '10']) assert (resp.status_code == status.HTTP_200_OK) assert (resp.data['award_count'] == 1) assert (resp.data['outlay'] == 222.0) assert (resp.data['obligation'] == 200.0) resp = helpers.post_for_amount_endpoint(client, url, ['N', 'O'], ['A', '07', '08']) assert (resp.status_code == status.HTTP_200_OK) assert (resp.data['award_count'] == 2) assert (resp.data['outlay'] == 334.0) assert (resp.data['obligation'] == 4.0) resp = helpers.post_for_amount_endpoint(client, url, ['9'], ['B']) assert (resp.status_code == status.HTTP_200_OK) assert (resp.data['award_count'] == 0) assert (resp.data['outlay'] == 0.0) assert (resp.data['obligation'] == 0.0) resp = helpers.post_for_amount_endpoint(client, url, ['L', 'M', 'N', 'O', 'P'], ['07', '08']) assert (resp.status_code == status.HTTP_200_OK) assert (resp.data['award_count'] == 2) assert (resp.data['outlay'] == 334.0) assert (resp.data['obligation'] == 4.0) assert (resp.data['face_value_of_loan'] == 7777.0)
class GreenPile(): def __init__(self, size_or_pool=1000): if isinstance(size_or_pool, GreenPool): self.pool = size_or_pool else: self.pool = GreenPool(size_or_pool) self.waiters = queue.LightQueue() self.counter = 0 def spawn(self, func, *args, **kw): self.counter += 1 try: gt = self.pool.spawn(func, *args, **kw) self.waiters.put(gt) except: self.counter -= 1 raise def __iter__(self): return self def next(self): if (self.counter == 0): raise StopIteration() return self._next() __next__ = next def _next(self): try: return self.waiters.get().wait() finally: self.counter -= 1
def test_return_named_type_with_named_type_and_null_in_union(): ' schema = {'type': 'record', 'name': 'my_record', 'fields': [{'name': 'my_union', 'type': ['null', {'name': 'foo', 'type': 'fixed', 'size': 10}, {'name': 'bar', 'type': 'enum', 'symbols': ['A', 'B']}]}]} records = [{'my_union': None}, {'my_union': ('foo', b'')}, {'my_union': ('bar', 'A')}] rt_records = roundtrip(fastavro.parse_schema(schema), records, return_named_type=True, return_named_type_override=True) assert (records == rt_records)
class ColabPainting(flx.PyWidget): color = flx.ColorProp(settable=True, doc='Paint color') status = flx.StringProp('', settable=True, doc='Status text') def init(self): self.set_color(random.choice(COLORS)) self.widget = ColabPaintingView(self) self._update_participants() def add_paint(self, pos): relay.add_paint_for_all(pos, self.color.hex) ('add_paint_for_all') def _any_user_adds_paint(self, *events): for ev in events: self.widget.add_paint_to_canvas(ev.pos, ev.color) .reaction('connections_changed') def _update_participants(self, *events): if self.session.status: sessions = flx.manager.get_connections(self.session.app_name) n = len(sessions) del sessions self.set_status(('%i persons are painting' % n))
(bot, 'playerCollect') def playerCollect(this, collector, collected): if ((collector.type == 'player') and (collected.type == 'object')): raw_item = collected.metadata[10] item = Item.fromNotch(raw_item) header = (("I'm so jealous. " + collector.username) if (collector.username != bot.username) else 'I ') bot.chat(f'{header} collected {item.count} {item.displayName}')
def init_adapter(model: 'PreTrainedModel', model_args: 'ModelArguments', finetuning_args: 'FinetuningArguments', is_trainable: bool, is_mergeable: bool) -> 'PreTrainedModel': if ((finetuning_args.finetuning_type == 'none') and is_trainable): raise ValueError('You cannot use finetuning_type=none while training.') if ((finetuning_args.finetuning_type == 'full') and is_trainable): logger.info('Fine-tuning method: Full') model = model.float() if (finetuning_args.finetuning_type == 'freeze'): logger.info('Fine-tuning method: Freeze') for (name, param) in model.named_parameters(): if (not any(((trainable_layer in name) for trainable_layer in finetuning_args.trainable_layers))): param.requires_grad_(False) else: param.data = param.data.to(torch.float32) if (model_args.checkpoint_dir is not None): assert load_trainable_params(model, model_args.checkpoint_dir[0]), 'Model checkpoint is not correctly loaded.' if (finetuning_args.finetuning_type == 'lora'): logger.info('Fine-tuning method: LoRA') latest_checkpoint = None if (model_args.checkpoint_dir is not None): assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], WEIGHTS_NAME)), 'Provided path ({}) does not contain a LoRA weight.'.format(model_args.checkpoint_dir[0]) assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], CONFIG_NAME)), 'The given checkpoint may be not a LoRA checkpoint, please specify `--finetuning_type full/freeze` instead.' if ((is_trainable and finetuning_args.resume_lora_training) or (not is_mergeable)): (checkpoints_to_merge, latest_checkpoint) = (model_args.checkpoint_dir[:(- 1)], model_args.checkpoint_dir[(- 1)]) else: checkpoints_to_merge = model_args.checkpoint_dir for checkpoint in checkpoints_to_merge: model = PeftModel.from_pretrained(model, checkpoint) model = model.merge_and_unload() if (len(checkpoints_to_merge) > 0): logger.info('Merged {} model checkpoint(s).'.format(len(checkpoints_to_merge))) if (latest_checkpoint is not None): model = PeftModel.from_pretrained(model, latest_checkpoint, is_trainable=is_trainable) if (is_trainable and (latest_checkpoint is None)): lora_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=finetuning_args.lora_rank, lora_alpha=finetuning_args.lora_alpha, lora_dropout=finetuning_args.lora_dropout, target_modules=finetuning_args.lora_target) model = get_peft_model(model, lora_config) if (model_args.checkpoint_dir is not None): logger.info('Loaded fine-tuned model from checkpoint(s): {}'.format(','.join(model_args.checkpoint_dir))) return model
def _run_scripts(args: argparse.Namespace, scripts: List[FalScript], faldbt: FalDbt): scheduler = Scheduler([TaskGroup(FalLocalHookTask.from_fal_script(script)) for script in scripts]) parallel_executor(args, faldbt, scheduler) failed_tasks: List[FalLocalHookTask] = [group.task for group in scheduler.filter_groups(Status.FAILURE)] failed_script_ids = [task.build_fal_script(faldbt).id for task in failed_tasks] if failed_script_ids: raise RuntimeError(f"Error in scripts {str.join(', ', failed_script_ids)}")
def extractMeteorStrikeCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('WGM', "World's Greatest Militia", 'translated'), ('re;s', 'RE: Survival', 'translated'), ('FM', 'Fallen Monarch', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def check_distribution(): codename = distro.codename().lower() if (codename in BIONIC_CODE_NAMES): logging.debug('Ubuntu 18.04 detected') return 'bionic' if (codename in FOCAL_CODE_NAMES): logging.debug('Ubuntu 20.04 detected') return 'focal' if (codename in JAMMY_CODE_NAMES): logging.debug('Ubuntu 22.04 detected') return 'jammy' if (codename in BUSTER_CODE_NAMES): logging.debug('Debian 10 detected') return 'buster' if (codename in BULLSEYE_CODE_NAMES): logging.debug('Debian 11/Kali detected') return 'bullseye' sys.exit(f'Your Distribution ({distro.id()} {distro.version()}) is not supported. FACT Extractor Installer requires Ubuntu 18.04/20.04/22.04, Debian 9/10, Kali or compatible!')
def normalize_grib_keys(f): f = alias_argument('levelist', ['level', 'levellist'])(f) f = alias_argument('levtype', ['leveltype'])(f) f = alias_argument('param', ['variable', 'parameter'])(f) f = alias_argument('number', ['realization', 'realisation'])(f) f = alias_argument('class', ['klass', 'class_'])(f) return f
class TestNonVendorProject(BaseAEATestCase, BaseTestCase): capture_log = True def setup(cls): super(TestNonVendorProject, cls).setup() cls.change_directory(Path('..')) cls.agent_name = 'generic_buyer' cls.run_cli_command('fetch', 'fetchai/generic_buyer:0.30.5', '--alias', cls.agent_name) cls.agents.add(cls.agent_name) cls.set_agent_context(cls.agent_name) ('aea.cli.upgrade.ItemUpgrader.is_non_vendor', True) ('aea.cli.upgrade.ItemUpgrader.check_upgrade_is_required', return_value='0.99.0') ('aea.cli.upgrade.ItemUpgrader.remove_item') ('aea.cli.upgrade.ItemUpgrader.add_item') def test_non_vendor_nothing_to_upgrade(self, *mocks): with cd(self.agent_name): base_agent_items = set(ItemRemoveHelper(self.load_mock_context()).get_agent_dependencies_with_reverse_dependencies().keys()) self.runner.invoke(cli, ['--skip-consistency-check', 'upgrade'], standalone_mode=False, catch_exceptions=False) agent_items = set(ItemRemoveHelper(self.load_mock_context()).get_agent_dependencies_with_reverse_dependencies().keys()) assert (base_agent_items == agent_items)
class OptionSeriesPolygonSonificationTracksMappingVolume(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def unbuffered(proc, stream='stdout'): stream = getattr(proc, stream) with contextlib.closing(stream): while (prockilled == False): out = [] last = stream.read(1) if ((last == '') and (proc.poll() is not None)): break while (last not in newlines): if ((last == '') and (proc.poll() is not None)): break out.append(last) last = stream.read(1) out = ''.join(out) (yield out)
def collect_view_data(request, second_nav, dc_dns_only=False, **context): view_data = {'navi': get_navigation(request, second_nav, dc_dns_only=dc_dns_only), 'load_base': get_base_template(request), 'tasklog_cached': get_tasklog_cached(request), 'pending_tasks': get_user_tasks(request), 'dcs_form': DcSwitch(request, prefix='dc'), 'submenu_auto': True} view_data.update(context) view_data_collected.send(sender='gui.utils.collect_view_data', request=request, context=view_data) return view_data
def sitemap_to_df(sitemap_url, max_workers=8, recursive=True): if sitemap_url.endswith('robots.txt'): return pd.concat([sitemap_to_df(sitemap, recursive=recursive) for sitemap in _sitemaps_from_robotstxt(sitemap_url)], ignore_index=True) if sitemap_url.endswith('xml.gz'): xml_text = urlopen(Request(sitemap_url, headers={'Accept-Encoding': 'gzip', 'User-Agent': ('advertools-' + version)})) resp_headers = xml_text.getheaders() xml_text = GzipFile(fileobj=xml_text) else: xml_text = urlopen(Request(sitemap_url, headers=headers)) resp_headers = xml_text.getheaders() xml_string = xml_text.read() root = ElementTree.fromstring(xml_string) sitemap_df = pd.DataFrame() if ((root.tag.split('}')[(- 1)] == 'sitemapindex') and recursive): multi_sitemap_df = pd.DataFrame() sitemap_url_list = [] for elem in root: for el in elem: if ('loc' in el.tag): if (el.text == sitemap_url): error_df = pd.DataFrame({'sitemap': [sitemap_url], 'errors': ['WARNING: Sitemap contains a link to itself']}) multi_sitemap_df = pd.concat([multi_sitemap_df, error_df], ignore_index=True) else: sitemap_url_list.append(el.text) with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: to_do = [] for sitemap in sitemap_url_list: future = executor.submit(sitemap_to_df, sitemap) to_do.append(future) done_iter = futures.as_completed(to_do) for future in done_iter: try: multi_sitemap_df = pd.concat([multi_sitemap_df, future.result()], ignore_index=True) except Exception as e: error_df = pd.DataFrame(dict(errors=str(e)), index=range(1)) future_str = hex(id(future)) hexes = [hex(id(f)) for f in to_do] index = hexes.index(future_str) error_df['sitemap'] = sitemap_url_list[index] logging.warning(msg=((str(e) + ' ') + sitemap_url_list[index])) multi_sitemap_df = pd.concat([multi_sitemap_df, error_df], ignore_index=True) return multi_sitemap_df else: logging.info(msg=('Getting ' + sitemap_url)) elem_df = _parse_sitemap(root) sitemap_df = pd.concat([sitemap_df, elem_df], ignore_index=True) sitemap_df['sitemap'] = ([sitemap_url] if sitemap_df.empty else sitemap_url) if ('lastmod' in sitemap_df): try: sitemap_df['lastmod'] = pd.to_datetime(sitemap_df['lastmod'], utc=True) except Exception as e: pass if ('priority' in sitemap_df): try: sitemap_df['priority'] = sitemap_df['priority'].astype(float) except Exception as e: pass etag_lastmod = {header.lower().replace('-', '_'): val for (header, val) in resp_headers if (header.lower() in ['etag', 'last-modified'])} sitemap_df = sitemap_df.assign(**etag_lastmod) if ('last_modified' in sitemap_df): sitemap_df['sitemap_last_modified'] = pd.to_datetime(sitemap_df['last_modified']) del sitemap_df['last_modified'] sitemap_df['sitemap_size_mb'] = ((len(xml_string) / 1024) / 1024) sitemap_df['download_date'] = pd.Timestamp.now(tz='UTC') return sitemap_df
def get_vm(filename): trk = [] VM_Str = {'Virtual Box': 'VBox', 'VMware': 'WMvare'} VM_Sign = {'Red Pill': '\x0f\x01\r\x00\x00\x00\x00A', 'VirtualPc trick': '\x0f?\x07\x0b', 'VMware trick': 'VMXh', 'VMCheck.dll': 'EC\x00\x01', 'VMCheck.dll for VirtualPC': '\x0f?\x07\x0bCEuyyyy', 'Xen': 'XenVMM', 'Bochs & QEmu CPUID Trick': 'DMAc', 'Torpig VMM Trick': 'eiyyy%\x00\x00\x00y3E=\x00\x00\x00\x80\x0f\x95A\x8bAA', 'Torpig (UPX) VMM Trick': "QQ\x0f\x01'\x00AuO5\x02aANf%212\x83\x7fN=\x06\x80\x0f\x95A\x8bAA"} with open(filename, 'rb') as f: buf = f.read() for string in VM_Str: match = re.findall(VM_Str[string], buf, (re.IGNORECASE | re.MULTILINE)) if match: trk.append(string) for trick in VM_Sign: if (buf.find(VM_Sign[trick][::(- 1)]) > (- 1)): trk.append(trick) return trk
.parametrize('value,expected', (('', False), ('a', False), (1, False), (True, False), ({'a': 1, 'b': 2}, False), (None, False), (b'', False), (b'arst', False), (('a', 'b'), False), (['a', 'b'], False), ([b'a', b'b'], False), ([b'a', None, b'b'], False), (list(), False), ([None], False), (([],), False), (([tuple()],), False), ([tuple()], False), ((tuple(), []), False), ((TOPICS_EMPTY, (b'arst',)), False), (TOPIC_A, False), (TOPICS_EMPTY, False), (TOPICS_SINGLE_NULL, True), (TOPICS_MANY, True), (TOPICS_MANY_WITH_NULL, True), (NESTED_TOPICS_A, True), (NESTED_TOPICS_B, True), (NESTED_TOPICS_C, True), (NESTED_TOPICS_D, True), (NESTED_TOPICS_E, True))) def test_is_valid_with_nested_topic_array(value, expected): actual = is_valid_with_nested_topic_array(value) assert (actual is expected)
class OptionPlotoptionsHeatmapSonificationContexttracksMappingLowpassFrequency(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class SrcInfo(): def __init__(self, filename, lineno, col_offset=None, end_lineno=None, end_col_offset=None, function=None): self.filename = filename self.lineno = lineno self.col_offset = col_offset self.end_lineno = end_lineno self.end_col_offset = end_col_offset self.function = function def __str__(self): colstr = ('' if (self.col_offset is None) else f':{self.col_offset}') return f'{self.filename}:{self.lineno}{colstr}'
class OptionPlotoptionsTimelineSonificationContexttracksPointgrouping(Options): def algorithm(self): return self._config_get('minmax') def algorithm(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def groupTimespan(self): return self._config_get(15) def groupTimespan(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get('y') def prop(self, text: str): self._config(text, js_type=False)
.usefixtures('use_tmpdir') def test_ensemble_config_fails_on_non_sensical_refcase_file(): refcase_file = 'CEST_PAS_UNE_REFCASE' refcase_file_content = '\n_________________________________________ _____ ____________________\n\\______ \\_ _____/\\_ _____/\\_ ___ \\ / _ \\ / _____/\\_ _____/\n | _/| __)_ | __) / \\ \\/ / /_\\ \\ \\_____ \\ | __)_\n | | \\| \\ | \\ \\ \\____/ | \\/ \\ | \\\n |____|_ /_______ / \\___ / \\______ /\\____|__ /_______ //_______ /\n \\/ \\/ \\/ \\/ \\/ \\/ \\/\n' with open((refcase_file + '.UNSMRY'), 'w+', encoding='utf-8') as refcase_file_handler: refcase_file_handler.write(refcase_file_content) with open((refcase_file + '.SMSPEC'), 'w+', encoding='utf-8') as refcase_file_handler: refcase_file_handler.write(refcase_file_content) with pytest.raises(expected_exception=IOError, match=refcase_file): config_dict = {ConfigKeys.REFCASE: refcase_file} EnsembleConfig.from_dict(config_dict=config_dict)
def test_overview_not_authorized(application, default_settings): view = views.ManagementOverview.as_view('overview') with application.test_request_context(): result = view() messages = get_flashed_messages(with_categories=True) expected = ('danger', 'You are not allowed to access the management panel') assert (result.status_code == 302) assert (messages[0] == expected)
class OptionSeriesSplineSonificationContexttracksActivewhen(Options): def crossingDown(self): return self._config_get(None) def crossingDown(self, num: float): self._config(num, js_type=False) def crossingUp(self): return self._config_get(None) def crossingUp(self, num: float): self._config(num, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get(None) def prop(self, text: str): self._config(text, js_type=False)
class FineTuningWrapper(nn.Module): def __init__(self, trunk: nn.Module, feature_layer: str, head: nn.Module, freeze_trunk: bool=True) -> None: super().__init__() self.trunk: GraphModule = create_feature_extractor(trunk, [feature_layer]) self.head = head self.feature_layer = feature_layer if freeze_trunk: self.freeze_trunk() def freeze_trunk(self) -> None: for param in self.trunk.parameters(): param.requires_grad = False def forward(self, x: torch.Tensor) -> torch.Tensor: features = self.trunk(x) return self.head(features[self.feature_layer])
class DcStorageView(APIView): serializer = DcNodeStorageSerializer order_by_default = ('node__hostname', 'zpool') order_by_field_map = {'hostname': 'node__hostname', 'zpool': 'zpool'} def __init__(self, request, name, data): super(DcStorageView, self).__init__(request) self.data = data self.name = name dc = request.dc if name: try: (zpool, hostname) = name.split('') if (not (zpool and hostname)): raise ValueError except ValueError: raise ObjectNotFound(model=NodeStorage) attrs = {'node__hostname': hostname, 'zpool': zpool} if (request.method != 'POST'): attrs['dc'] = dc ns = get_object(request, NodeStorage, attrs, sr=('node', 'storage', 'storage__owner'), exists_ok=True, noexists_fail=True) ns.set_dc(dc) try: ns.set_dc_node(DcNode.objects.get(node=ns.node, dc=dc)) except DcNode.DoesNotExist: raise PreconditionRequired(_('Compute node is not available')) else: ns = NodeStorage.objects.filter(dc=dc).order_by(*self.order_by) if (self.full or self.extended): dc_nodes = {dn.node.hostname: dn for dn in DcNode.objects.select_related('node').filter(dc=request.dc)} ns = ns.select_related('node', 'storage', 'storage__owner') for i in ns: i.set_dc_node(dc_nodes.get(i.node.hostname, None)) i.set_dc(dc) self.ns = ns def get(self, many=False): if self.extended: serializer = ExtendedDcNodeStorageSerializer else: serializer = self.serializer if (many or (not self.name)): if (self.full or self.extended): if self.ns: res = serializer(self.request, self.ns, many=True).data else: res = [] else: res = [''.join(i) for i in self.ns.values_list('zpool', 'node__hostname')] else: res = serializer(self.request, self.ns).data return SuccessTaskResponse(self.request, res) def post(self): (ns, dc) = (self.ns, self.request.dc) if ns.dc.filter(id=dc.id).exists(): raise ObjectAlreadyExists(model=NodeStorage) ser = self.serializer(self.request, ns) ns.dc.add(dc) return SuccessTaskResponse(self.request, ser.data, obj=ns, status=status.HTTP_201_CREATED, detail_dict=ser.detail_dict(), msg=LOG_STORAGE_ATTACH) def delete(self): (ns, dc) = (self.ns, self.request.dc) for vm in dc.vm_set.filter(node=ns.node): if (ns.zpool in vm.get_used_disk_pools()): raise PreconditionRequired(_('Storage is used by some VMs')) if dc.backup_set.filter(zpool=ns).exists(): raise PreconditionRequired(_('Storage is used by some VM backups')) ser = self.serializer(self.request, ns) ns.dc.remove(dc) return SuccessTaskResponse(self.request, None, obj=ns, detail_dict=ser.detail_dict(), msg=LOG_STORAGE_DETACH)
class OptionsTableCell(OptionsWithTemplates): def cssClasses(self): return self._config_get([]) def cssClasses(self, values): self._config(values) def center(self): return self._config_get() def center(self, flag): if flag: self.component.attr['class'].add('text-center') def position(self, alias): self.component.attr['class'].add(('text-%s' % alias)) def align(self, alias): self.component.attr['class'].add(('align-%s' % alias)) def padding(self, value, position='s'): self.component.attr['class'].add(('p%s-%s' % (position, value))) def margin(self, value, position='s'): self.component.attr['class'].add(('m%s-%s' % (position, value)))
class TestAddAndEjectComponent(AEATestCaseEmpty): def test_add_and_eject(self): result = self.add_item('skill', str(ECHO_SKILL_PUBLIC_ID), local=True) assert (result.exit_code == 0) result = self.eject_item('skill', str(ECHO_SKILL_PUBLIC_ID)) assert (result.exit_code == 0)
class TestQuery(BodhiClientTestCase): def test_with_bugs_empty_string(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(builds='bodhi-2.4.0-1.fc26', bugs='') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'builds': 'bodhi-2.4.0-1.fc26', 'bugs': None}) def test_with_limit(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(builds='bodhi-2.4.0-1.fc26', limit=50) assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'builds': 'bodhi-2.4.0-1.fc26', 'rows_per_page': 50}) def test_with_mine_false(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(builds='bodhi-2.4.0-1.fc26', mine=False) assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'builds': 'bodhi-2.4.0-1.fc26', 'mine': False}) def test_with_mine_true(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') client.oidc.username = 'bowlofeggs' result = client.query(builds='bodhi-2.4.0-1.fc26', mine=True) assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'builds': 'bodhi-2.4.0-1.fc26', 'mine': True, 'user': 'bowlofeggs'}) def test_with_mine_no_username(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') client.oidc.username = None with pytest.raises(bindings.BodhiClientException) as exc: client.query(builds='bodhi-2.4.0-1.fc26', mine=True) assert (str(exc.value) == 'Could not get user info.') def test_with_package_el_build(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(package='bodhi-2.4.0-1.el7') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'builds': 'bodhi-2.4.0-1.el7'}) def test_with_package_epel_id(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(package='FEDORA-EPEL-2017-c3b112eb9e') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'updateid': 'FEDORA-EPEL-2017-c3b112eb9e'}) def test_with_package_fc_build(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(package='bodhi-2.4.0-1.fc26') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'builds': 'bodhi-2.4.0-1.fc26'}) def test_with_package_fedora_id(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(package='FEDORA-2017-52506b30d4') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'updateid': 'FEDORA-2017-52506b30d4'}) def test_with_package_name(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(package='bodhi') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'packages': 'bodhi'}) def test_with_release_list(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(packages='bodhi', release=['f27']) assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'packages': 'bodhi', 'releases': ['f27']}) def test_with_release_str(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(packages='bodhi', release='f26') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'packages': 'bodhi', 'releases': ['f26']}) def test_with_type_(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(packages='bodhi', type_='security') assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'packages': 'bodhi', 'type': 'security'}) def test_query_with_rows_per_page(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(packages='bodhi', rows_per_page=10) assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'packages': 'bodhi', 'rows_per_page': 10}) def test_query_with_page(self, mocker): client = bindings.BodhiClient() client.send_request = mocker.MagicMock(return_value='return_value') result = client.query(packages='bodhi', page=5) assert (result == 'return_value') client.send_request.assert_called_once_with('updates/', verb='GET', params={'packages': 'bodhi', 'page': 5})
.parametrize('existing_content', [None, 'blublu']) def test_temporary_save_path(tmp_path: Path, existing_content: Optional[str]) -> None: filepath = (tmp_path / 'save_and_move_test.txt') if existing_content: filepath.write_text(existing_content) with utils.temporary_save_path(filepath) as tmp: assert str(tmp).endswith('.txt.save_tmp') tmp.write_text('12') if existing_content: assert (filepath.read_text() == existing_content) assert (filepath.read_text() == '12')
class SequenceTester(UnitTestDBBase): def setUp(self): super(SequenceTester, self).setUp() from stalker import Type self.project_type = Type(name='Test Project Type', code='test', target_entity_type='Project') from stalker.db.session import DBSession DBSession.add(self.project_type) self.repository_type = Type(name='Test Type', code='test', target_entity_type='Repository') DBSession.add(self.repository_type) from stalker import Repository self.test_repository = Repository(name='Test Repository', code='TR', type=self.repository_type) DBSession.add(self.test_repository) from stalker import Project self.test_project = Project(name='Test Project 1', code='tp1', type=self.project_type, repository=self.test_repository) DBSession.add(self.test_project) self.test_project2 = Project(name='Test Project 2', code='tp2', type=self.project_type, repository=self.test_repository) DBSession.add(self.test_project2) self.kwargs = {'name': 'Test Sequence', 'code': 'tseq', 'description': 'A test sequence', 'project': self.test_project} from stalker import Sequence self.test_sequence = Sequence(**self.kwargs) DBSession.commit() def test___auto_name__class_attribute_is_set_to_False(self): from stalker import Sequence assert (Sequence.__auto_name__ is False) def test_plural_class_name(self): assert (self.test_sequence.plural_class_name == 'Sequences') def test___strictly_typed___is_False(self): from stalker import Sequence assert (Sequence.__strictly_typed__ is False) def test_shots_attribute_defaults_to_empty_list(self): from stalker import Sequence new_sequence = Sequence(**self.kwargs) assert (new_sequence.shots == []) def test_shots_attribute_is_set_None(self): with pytest.raises(TypeError) as cm: self.test_sequence.shots = None assert (str(cm.value) == 'Incompatible collection type: None is not list-like') def test_shots_attribute_is_set_to_other_than_a_list(self): test_value = 'a string' with pytest.raises(TypeError) as cm: self.test_sequence.shots = test_value assert (str(cm.value) == 'Incompatible collection type: str is not list-like') def test_shots_attribute_is_a_list_of_other_objects(self): test_value = [1, 1.2, 'a string'] with pytest.raises(TypeError) as cm: self.test_sequence.shots = test_value assert (str(cm.value) == 'Sequence.shots should be all stalker.models.shot.Shot instances, not int') def test_shots_attribute_elements_tried_to_be_set_to_non_Shot_object(self): test_value = 'a string' with pytest.raises(TypeError) as cm: self.test_sequence.shots.append(test_value) assert (str(cm.value) == 'Sequence.shots should be all stalker.models.shot.Shot instances, not str') def test_equality(self): from stalker import Entity, Sequence new_seq1 = Sequence(**self.kwargs) new_seq2 = Sequence(**self.kwargs) new_entity = Entity(**self.kwargs) self.kwargs['name'] = 'a different sequence' new_seq3 = Sequence(**self.kwargs) assert (new_seq1 == new_seq2) assert (not (new_seq1 == new_seq3)) assert (not (new_seq1 == new_entity)) def test_inequality(self): from stalker import Entity, Sequence new_seq1 = Sequence(**self.kwargs) new_seq2 = Sequence(**self.kwargs) new_entity = Entity(**self.kwargs) self.kwargs['name'] = 'a different sequence' new_seq3 = Sequence(**self.kwargs) assert (not (new_seq1 != new_seq2)) assert (new_seq1 != new_seq3) assert (new_seq1 != new_entity) def test_ReferenceMixin_initialization(self): from stalker import Type, Link, Sequence link_type_1 = Type(name='Image', code='image', target_entity_type='Link') link1 = Link(name='Artwork 1', full_path='/mnt/M/JOBs/TEST_PROJECT', filename='a.jpg', type=link_type_1) link2 = Link(name='Artwork 2', full_path='/mnt/M/JOBs/TEST_PROJECT', filename='b.jbg', type=link_type_1) references = [link1, link2] self.kwargs['references'] = references new_sequence = Sequence(**self.kwargs) assert (new_sequence.references == references) def test_initialization_of_task_part(self): from stalker import Type, Project, Sequence, Task project_type = Type(name='Commercial', code='comm', target_entity_type='Project') new_project = Project(name='Commercial', code='comm', type=project_type, repository=self.test_repository) self.kwargs['project'] = new_project new_sequence = Sequence(**self.kwargs) task1 = Task(name='Modeling', status=0, project=new_project, parent=new_sequence) task2 = Task(name='Lighting', status=0, project=new_project, parent=new_sequence) tasks = [task1, task2] assert (sorted(new_sequence.tasks, key=(lambda x: x.name)) == sorted(tasks, key=(lambda x: x.name))) def test_ProjectMixin_initialization(self): from stalker import Type project_type = Type(name='Commercial', code='comm', target_entity_type='Project') from stalker import Project new_project = Project(name='Test Project', code='tp', type=project_type, repository=self.test_repository) from stalker import Sequence self.kwargs['project'] = new_project new_sequence = Sequence(**self.kwargs) assert (new_sequence.project == new_project)
def task_revoked_handler(sender, request, terminated, signum, expired, **kwargs): if getattr(request, 'erigonesd_knows', False): logger.info('Task %s[%s] in revoked_handler :: Already running - skipping', sender.name, request.id) return setattr(request, 'erigonesd_knows', True) task_id = request.id if terminated: detail = ('terminated (%s)' % signum) elif expired: detail = 'expired' else: detail = 'revoked' log_task_callback(task_id, detail=detail, sender_name=sender.name)
def generate_checksum(param_dict, merchant_key, salt=None): params_string = __get_param_string__(param_dict) salt = (salt if salt else __id_generator__(4)) final_string = f'{params_string}|{salt}' hasher = hashlib.sha256(final_string.encode()) hash_string = hasher.hexdigest() hash_string += salt return __encode__(hash_string, IV, merchant_key)
def extractThenovelstCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False