code
stringlengths
281
23.7M
class BaseSchema(SQLAlchemySchema): class Meta(): sqla_session = models.db.session load_instance = True include_relationships = True include_fk = True def get_attribute(self, obj, attr, default): if ('.' in attr): return super().get_attribute(obj, attr, default) return getattr(obj, attr, default)
def check_nyanko_signature(signature: str, data: str, inquiry_code: str) -> bool: curr_hmac_data = signature[64:] curr_random_data = signature[:64] curr_input_rand = (inquiry_code.encode('utf-8') + curr_random_data.encode('utf-8')) hmac_data = hmac.new(curr_input_rand, data.encode('utf-8'), digestmod=hashlib.sha256).hexdigest() return (hmac_data == curr_hmac_data)
.parametrize('system_app', [{'app_config': {'DEBUG': True, 'dbgpt.serve.prompt.default_user': 'dbgpt', 'dbgpt.serve.prompt.default_sys_code': 'dbgpt'}}], indirect=True) def test_config_default_user(service: Service): system_app: SystemApp = service._system_app assert (system_app.config.get('DEBUG') is True) assert (system_app.config.get('dbgpt.serve.prompt.default_user') == 'dbgpt') assert (service.config is not None) assert (service.config.default_user == 'dbgpt') assert (service.config.default_sys_code == 'dbgpt')
def extractCultureasiatlBlogspotCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def Check_ExprBound(proc, stmts, expr, value, option, exception=True): assert isinstance(option, Check_ExprBound_Options) assert (len(stmts) > 0) ctxt = ContextExtraction(proc, stmts) p = ctxt.get_control_predicate() G = ctxt.get_pre_globenv() slv = SMTSolver(verbose=False) slv.push() slv.assume(AMay(p)) e = G(lift_e(expr)) if (option == Check_ExprBound_Options.GEQ): query = ADef((e >= AInt(value))) err_msg = f'greater than or equal to {value}' elif (option == Check_ExprBound_Options.GT): query = ADef((e > AInt(value))) err_msg = f'greater than {value}' elif (option == Check_ExprBound_Options.LEQ): query = ADef((e <= AInt(value))) err_msg = f'less than or equal to {value}' elif (option == Check_ExprBound_Options.LT): query = ADef((e < AInt(value))) err_msg = f'greater than {value}' elif (option == Check_ExprBound_Options.EQ): query = ADef(e=AInt(value)) err_msg = f'equal to {value}' else: assert False, 'Bad case' success = slv.verify(query) slv.pop() if (not exception): return success if (not success): estr = str(expr) if (estr[(- 1)] == '\n'): estr = estr[:(- 1)] raise SchedulingError(f'The expression {estr} is not guaranteed to be {err_msg}.')
_tag(takes_context=False) def get_admin_interface_active_date_hierarchy(changelist): date_field_name = changelist.date_hierarchy if (not date_field_name): return params = changelist.get_filters_params() if (f'{date_field_name}__year' not in params): return return date_field_name
def test_update(data_client): elasticsearch_repo = Repository.get('elasticsearch-dsl-py') v = elasticsearch_repo.meta.version old_seq_no = elasticsearch_repo.meta.seq_no elasticsearch_repo.update(owner={'new_name': 'elastic'}, new_field='testing-update') assert ('elastic' == elasticsearch_repo.owner.new_name) assert ('testing-update' == elasticsearch_repo.new_field) assert (elasticsearch_repo.meta.version == (v + 1)) new_version = Repository.get('elasticsearch-dsl-py') assert ('testing-update' == new_version.new_field) assert ('elastic' == new_version.owner.new_name) assert ('elasticsearch' == new_version.owner.name) assert ('seq_no' in new_version.meta) assert (new_version.meta.seq_no != old_seq_no) assert ('primary_term' in new_version.meta)
def randColor(seed_no: int=None): letters = 'ABCDEF' color = ['#'] if (seed_no is not None): random.seed(seed_no) for i in range(6): color.append(letters[math.floor((random.random() * 16))]) if (seed_no is not None): random.seed(None) return ''.join(color)
def extractDmlationsWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('The Promise Sealed with our Lips', 'The Promise Sealed with our Lips', 'translated'), ('RSCB', 'Rebirth of the Supreme Celestial Being', 'translated'), ('Seizing Dreams', 'Seizing Dreams', 'translated'), ('sd', 'Seizing Dreams', 'translated'), ('NENH', 'New Era, New Hell', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class LinearPolarizingBeamSplitter(OpticalElement): def __init__(self, polarization_angle, wavelength=1): self.polarization_angle = polarization_angle def polarization_angle(self): return self._polarization_angle _angle.setter def polarization_angle(self, polarization_angle): self._polarization_angle = polarization_angle self.polarizer_port_1 = LinearPolarizer(polarization_angle) self.polarizer_port_2 = LinearPolarizer((polarization_angle + (np.pi / 2))) def forward(self, wavefront): wf_1 = self.polarizer_port_1.forward(wavefront) wf_2 = self.polarizer_port_2.forward(wavefront) return (wf_1, wf_2) def backward(self, wavefront): raise RuntimeError('Backward propagation through PolarizingBeamSplitter not possible.') def mueller_matrices(self): return (jones_to_mueller(self.polarizer_port_1.jones_matrix), jones_to_mueller(self.polarizer_port_2.jones_matrix))
def get_mission_data() -> dict[(str, Any)]: missions: dict[(str, dict[(int, int)])] = {} missions['states'] = get_mission_segment() missions['requirements'] = get_mission_segment() missions['clear_types'] = get_mission_segment() missions['gamatoto'] = get_mission_segment() missions['nyancombo'] = get_mission_segment() missions['user_rank'] = get_mission_segment() missions['expiry'] = get_mission_segment() missions['preparing'] = get_mission_segment() return missions
class TestEvAdventureTwitchCombatHandler(EvenniaCommandTestMixin, _CombatTestBase): def setUp(self): super().setUp() self.char1 = self.combatant self.account = None self.combatant_combathandler = combat_twitch.EvAdventureCombatTwitchHandler.get_or_create_combathandler(self.combatant, key='combathandler') self.target_combathandler = combat_twitch.EvAdventureCombatTwitchHandler.get_or_create_combathandler(self.target, key='combathandler') def test_get_sides(self): sides = self.combatant_combathandler.get_sides(self.combatant) self.assertEqual(sides, ([self.combatant], [self.target])) def test_give_advantage(self): self.combatant_combathandler.give_advantage(self.combatant, self.target) self.assertTrue(self.combatant_combathandler.advantage_against[self.target]) def test_give_disadvantage(self): self.combatant_combathandler.give_disadvantage(self.combatant, self.target) self.assertTrue(self.combatant_combathandler.disadvantage_against[self.target]) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock(return_value=999)) def test_queue_action(self): actiondict = {'key': 'hold'} self.combatant_combathandler.queue_action(actiondict) self.assertIsNone(self.combatant_combathandler.current_ticker_ref) actiondict = {'key': 'hold', 'dt': 5} self.combatant_combathandler.queue_action(actiondict) self.assertEqual(self.combatant_combathandler.current_ticker_ref, 999) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock()) def test_execute_next_action(self): self.combatant_combathandler.action_dict = {'key': 'hold', 'dummy': 'foo', 'repeat': False} self.combatant_combathandler.execute_next_action() self.assertEqual(self.combatant_combathandler.action_dict, self.combatant_combathandler.fallback_action_dict) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) def test_check_stop_combat(self): self.combatant_combathandler.get_sides = Mock(return_value=([], [])) self.combatant_combathandler.stop_combat = Mock() self.combatant.hp = (- 1) self.target.hp = (- 1) self.combatant_combathandler.check_stop_combat() self.combatant.msg.assert_called_with(text=('Noone stands after the dust settles.', {}), from_obj=self.combatant) self.combatant_combathandler.stop_combat.assert_called() self.combatant.hp = 10 self.target.hp = (- 1) self.combatant_combathandler.get_sides = Mock(return_value=([self.combatant], [])) self.combatant_combathandler.check_stop_combat() self.combatant.msg.assert_called_with(text=('The combat is over.', {}), from_obj=self.combatant) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock()) def test_hold(self): self.call(combat_twitch.CmdHold(), '', 'You hold back, doing nothing') self.assertEqual(self.combatant_combathandler.action_dict, {'key': 'hold'}) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock()) def test_attack(self): self.call(combat_twitch.CmdAttack(), self.target.key, 'You attack testmonster!') self.assertEqual(self.combatant_combathandler.action_dict, {'key': 'attack', 'target': self.target, 'dt': 3, 'repeat': True}) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock()) def test_stunt(self): boost_result = {'key': 'stunt', 'recipient': self.combatant, 'target': self.target, 'advantage': True, 'stunt_type': Ability.STR, 'defense_type': Ability.STR, 'dt': 3} foil_result = {'key': 'stunt', 'recipient': self.target, 'target': self.combatant, 'advantage': False, 'stunt_type': Ability.STR, 'defense_type': Ability.STR, 'dt': 3} self.call(combat_twitch.CmdStunt(), f'STR {self.target.key}', 'You prepare a stunt!', cmdstring='boost') self.assertEqual(self.combatant_combathandler.action_dict, boost_result) self.call(combat_twitch.CmdStunt(), f'STR me {self.target.key}', 'You prepare a stunt!', cmdstring='boost') self.assertEqual(self.combatant_combathandler.action_dict, boost_result) self.call(combat_twitch.CmdStunt(), f'STR {self.target.key}', 'You prepare a stunt!', cmdstring='foil') self.assertEqual(self.combatant_combathandler.action_dict, foil_result) self.call(combat_twitch.CmdStunt(), f'STR {self.target.key} me', 'You prepare a stunt!', cmdstring='foil') self.assertEqual(self.combatant_combathandler.action_dict, foil_result) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock()) def test_useitem(self): item = create.create_object(EvAdventureConsumable, key='potion', attributes=[('uses', 2)], location=self.combatant) self.call(combat_twitch.CmdUseItem(), 'potion', 'You prepare to use potion!') self.assertEqual(self.combatant_combathandler.action_dict, {'key': 'use', 'item': item, 'target': self.combatant, 'dt': 3}) self.call(combat_twitch.CmdUseItem(), f'potion on {self.target.key}', 'You prepare to use potion!') self.assertEqual(self.combatant_combathandler.action_dict, {'key': 'use', 'item': item, 'target': self.target, 'dt': 3}) ('evennia.contrib.tutorials.evadventure.combat_twitch.unrepeat', new=Mock()) ('evennia.contrib.tutorials.evadventure.combat_twitch.repeat', new=Mock()) def test_wield(self): sword = create.create_object(EvAdventureWeapon, key='sword', location=self.combatant) runestone = create.create_object(EvAdventureWeapon, key='runestone', location=self.combatant) self.call(combat_twitch.CmdWield(), 'sword', 'You reach for sword!') self.assertEqual(self.combatant_combathandler.action_dict, {'key': 'wield', 'item': sword, 'dt': 3}) self.call(combat_twitch.CmdWield(), 'runestone', 'You reach for runestone!') self.assertEqual(self.combatant_combathandler.action_dict, {'key': 'wield', 'item': runestone, 'dt': 3})
class BenchmarkActor(actor.RallyActor): def __init__(self): super().__init__() self.cfg = None self.start_sender = None self.mechanic = None self.main_driver = None self.coordinator = None def receiveMsg_PoisonMessage(self, msg, sender): self.logger.debug('BenchmarkActor got notified of poison message [%s] (forwarding).', str(msg)) if self.coordinator: self.coordinator.error = True self.send(self.start_sender, msg) def receiveUnrecognizedMessage(self, msg, sender): self.logger.debug('BenchmarkActor received unknown message [%s] (ignoring).', str(msg)) _retry('race control') def receiveMsg_Setup(self, msg, sender): self.start_sender = sender self.cfg = msg.cfg self.coordinator = BenchmarkCoordinator(msg.cfg) self.coordinator.setup(sources=msg.sources) self.logger.info('Asking mechanic to start the engine.') self.mechanic = self.createActor(mechanic.MechanicActor, targetActorRequirements={'coordinator': True}) self.send(self.mechanic, mechanic.StartEngine(self.cfg, self.coordinator.metrics_store.open_context, msg.sources, msg.distribution, msg.external, msg.docker)) _retry('race control') def receiveMsg_EngineStarted(self, msg, sender): self.logger.info('Mechanic has started engine successfully.') self.coordinator.race.team_revision = msg.team_revision self.main_driver = self.createActor(driver.DriverActor, targetActorRequirements={'coordinator': True}) self.logger.info('Telling driver to prepare for benchmarking.') self.send(self.main_driver, driver.PrepareBenchmark(self.cfg, self.coordinator.current_track)) _retry('race control') def receiveMsg_PreparationComplete(self, msg, sender): self.coordinator.on_preparation_complete(msg.distribution_flavor, msg.distribution_version, msg.revision) self.logger.info('Telling driver to start benchmark.') self.send(self.main_driver, driver.StartBenchmark()) _retry('race control') def receiveMsg_TaskFinished(self, msg, sender): self.coordinator.on_task_finished(msg.metrics) self.send(self.mechanic, mechanic.ResetRelativeTime(msg.next_task_scheduled_in)) _retry('race control') def receiveMsg_BenchmarkCancelled(self, msg, sender): self.coordinator.cancelled = True self.send(self.start_sender, msg) _retry('race control') def receiveMsg_BenchmarkFailure(self, msg, sender): self.logger.info('Received a benchmark failure from [%s] and will forward it now.', sender) self.coordinator.error = True self.send(self.start_sender, msg) _retry('race control') def receiveMsg_BenchmarkComplete(self, msg, sender): self.coordinator.on_benchmark_complete(msg.metrics) self.send(self.main_driver, thespian.actors.ActorExitRequest()) self.main_driver = None self.logger.info('Asking mechanic to stop the engine.') self.send(self.mechanic, mechanic.StopEngine()) _retry('race control') def receiveMsg_EngineStopped(self, msg, sender): self.logger.info('Mechanic has stopped engine successfully.') self.send(self.start_sender, Success())
class OptionSeriesTreemapSonificationContexttracksMappingLowpass(Options): def frequency(self) -> 'OptionSeriesTreemapSonificationContexttracksMappingLowpassFrequency': return self._config_sub_data('frequency', OptionSeriesTreemapSonificationContexttracksMappingLowpassFrequency) def resonance(self) -> 'OptionSeriesTreemapSonificationContexttracksMappingLowpassResonance': return self._config_sub_data('resonance', OptionSeriesTreemapSonificationContexttracksMappingLowpassResonance)
class TestMessages(unittest.TestCase): def test_hello_construction(self): msg = ofp.message.hello() self.assertEqual(msg.version, ofp.OFP_VERSION) self.assertEqual(msg.type, ofp.OFPT_HELLO) self.assertEqual(msg.xid, None) msg = ofp.message.hello(xid=123) self.assertEqual(msg.xid, 123) msg = ofp.message.hello(xid=0) self.assertEqual(msg.xid, 0) def test_echo_request_construction(self): msg = ofp.message.echo_request(data='abc') self.assertEqual(msg.data, 'abc') def test_echo_request_invalid_length(self): buf = b'\x01\x02\x00\x07\x124V' with self.assertRaisesRegex(ofp.ProtocolError, 'Buffer too short'): ofp.message.echo_request.unpack(OFReader(buf)) def test_echo_request_equality(self): msg = ofp.message.echo_request(xid=, data='abc') msg2 = ofp.message.echo_request(xid=, data='abc') self.assertEqual(msg, msg2) msg2.xid = 1 self.assertNotEqual(msg, msg2) msg2.xid = msg.xid msg2.data = 'a' self.assertNotEqual(msg, msg2) msg2.data = msg.data
def test_remote_fetch_execution(remote): admin_workflow_execution = Execution(id=WorkflowExecutionIdentifier('p1', 'd1', 'n1'), spec=MagicMock(), closure=MagicMock()) mock_client = MagicMock() mock_client.get_execution.return_value = admin_workflow_execution remote._client = mock_client flyte_workflow_execution = remote.fetch_execution(name='n1') assert (flyte_workflow_execution.id == admin_workflow_execution.id)
class BorgTest(unittest.TestCase): def setUp(self): self.b1 = Borg() self.b2 = Borg() self.ib1 = YourBorg() def tearDown(self): self.ib1.state = 'Init' def test_initial_borg_state_shall_be_init(self): b = Borg() self.assertEqual(b.state, 'Init') def test_changing_instance_attribute_shall_change_borg_state(self): self.b1.state = 'Running' self.assertEqual(self.b1.state, 'Running') self.assertEqual(self.b2.state, 'Running') self.assertEqual(self.ib1.state, 'Running') def test_instances_shall_have_own_ids(self): self.assertNotEqual(id(self.b1), id(self.b2), id(self.ib1))
def build_package(cfg: Config, pkg_path: str, build_dir: str) -> None: try: prev = os.getcwd() os.chdir(pkg_path) log.info(f"Building {get_package_info('.').name}") shutil.rmtree('dist', ignore_errors=True) cmd = ['-m', 'build', '-o', build_dir, *cfg.build_targets] run_python_script(cmd=cmd, allow_warnings=False) finally: os.chdir(prev)
def create_recoverable_error_file() -> None: error_file_path = os.environ.get('TORCHELASTIC_ERROR_FILE') if (error_file_path is None): raise ValueError('`TORCHELASTIC_ERROR_FILE` environment variable not set') recoverable_error_file_path = os.path.join(os.path.dirname(error_file_path), RECOVERABLE_ERROR_FILE_NAME) with open(recoverable_error_file_path, 'w') as f: f.write('')
class HeaderSyncStrategy(BaseSyncStrategy): def get_sync_mode(cls) -> str: return 'header' def configure_parser(cls, arg_group: _ArgumentGroup) -> None: add_sync_from_checkpoint_arg(arg_group) add_disable_backfill_arg(arg_group) async def sync(self, args: Namespace, logger: logging.Logger, chain: AsyncChainAPI, base_db: AtomicDatabaseAPI, peer_pool: BasePeerPool, event_bus: EndpointAPI, metrics_service: MetricsServiceAPI) -> None: syncer = HeaderChainSyncer(chain, AsyncChainDB(base_db), cast(ETHPeerPool, peer_pool), enable_backfill=(not args.disable_backfill), checkpoint=args.sync_from_checkpoint) async with background_asyncio_service(syncer) as manager: (await manager.wait_finished())
def mutualInfor(x, y, norm=False): bXY = numBins(x.shape[0], corr=np.corrcoef(x, y)[(0, 1)]) cXY = np.histogram2d(x, y, bXY)[0] iXY = mutual_info_score(None, None, contingency=cXY) if norm: hX = ss.entropy(np.histogram(x, bins)[0]) hY = ss.entropy(np.histogram(y, bins)[0]) iXY /= min(hX, hY) return iXY
class OptionPlotoptionsPictorialSonificationDefaultinstrumentoptionsMappingNoteduration(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def test_balance_wei(accounts, web3, chain): balance = accounts[0].balance() assert (web3.eth.get_balance(accounts[0].address) == balance) accounts[1].transfer(accounts[0], '1 ether') assert (accounts[0].balance() == (balance + )) chain.reset() assert (web3.eth.get_balance(accounts[0].address) == balance)
class CrawlerBase(unittest_utils.ForsetiTestCase): def setUp(self): self.maxDiff = None unittest_utils.ForsetiTestCase.setUp(self) def tearDown(self): unittest_utils.ForsetiTestCase.tearDown(self) def _get_resource_counts_from_storage(self, storage): result_counts = {} for item in list(storage.mem.values()): item_type = item.type() item_counts = result_counts.setdefault(item_type, {'resource': 0}) item_counts['resource'] += 1 if item.get_iam_policy(): item_counts.setdefault('iam_policy', 0) item_counts['iam_policy'] += 1 if item.get_gcs_policy(): item_counts.setdefault('gcs_policy', 0) item_counts['gcs_policy'] += 1 if item.get_dataset_policy(): item_counts.setdefault('dataset_policy', 0) item_counts['dataset_policy'] += 1 if item.get_billing_info(): item_counts.setdefault('billing_info', 0) item_counts['billing_info'] += 1 if item.get_enabled_apis(): item_counts.setdefault('enabled_apis', 0) item_counts['enabled_apis'] += 1 if item.get_kubernetes_service_config(): item_counts.setdefault('service_config', 0) item_counts['service_config'] += 1 return result_counts def _run_crawler(self, config, has_org_access=True): with MemoryStorage() as storage: progresser = NullProgresser() with gcp_api_mocks.mock_gcp(has_org_access=has_org_access): run_crawler(storage, progresser, config, parallel=False, threads=1) self.assertEqual(0, progresser.errors, 'No errors should have occurred') return self._get_resource_counts_from_storage(storage)
class DbObjWrappers(TestCase): def setUp(self): super().setUp() self.dbobj1 = DefaultObject(db_key='Tester1') self.dbobj1.save() self.dbobj2 = DefaultObject(db_key='Tester2') self.dbobj2.save() def test_dbobj_hidden_obj__fail(self): with self.assertRaises(TypeError): self.dbobj1.db.testarg = _InvalidContainer(self.dbobj1) def test_consecutive_fetch(self): con = _ValidContainer(self.dbobj2) self.dbobj1.db.testarg = con attrobj = self.dbobj1.attributes.get('testarg', return_obj=True) self.assertEqual(attrobj.value, con) self.assertEqual(attrobj.value, con) self.assertEqual(attrobj.value.hidden_obj, self.dbobj2) def test_dbobj_hidden_obj__success(self): con = _ValidContainer(self.dbobj2) self.dbobj1.db.testarg = con res1 = self.dbobj1.db.testarg res2 = self.dbobj1.db.testarg res3 = self.dbobj1.db.testarg self.assertEqual(res1, res2) self.assertEqual(res1, res3) self.assertEqual(res1, con) self.assertEqual(res2, con) self.assertEqual(res1.hidden_obj, self.dbobj2) self.assertEqual(res2.hidden_obj, self.dbobj2) self.assertEqual(res3.hidden_obj, self.dbobj2) def test_dbobj_hidden_dict(self): con1 = _ValidContainer(self.dbobj2) con2 = _ValidContainer(self.dbobj2) self.dbobj1.db.dict = {} self.dbobj1.db.dict['key1'] = con1 self.dbobj1.db.dict['key2'] = con2 self.assertEqual(self.dbobj1.db.dict['key1'].hidden_obj, self.dbobj2) self.assertEqual(self.dbobj1.db.dict['key1'].hidden_obj, self.dbobj2) self.assertEqual(self.dbobj1.db.dict['key2'].hidden_obj, self.dbobj2) self.assertEqual(self.dbobj1.db.dict['key2'].hidden_obj, self.dbobj2) def test_dbobj_hidden_defaultdict(self): con1 = _ValidContainer(self.dbobj2) con2 = _ValidContainer(self.dbobj2) self.dbobj1.db.dfdict = defaultdict(dict) self.dbobj1.db.dfdict['key']['con1'] = con1 self.dbobj1.db.dfdict['key']['con2'] = con2 self.assertEqual(self.dbobj1.db.dfdict['key']['con1'].hidden_obj, self.dbobj2) self.assertEqual(self.dbobj1.db.dfdict['key']['con1'].hidden_obj, self.dbobj2) self.assertEqual(self.dbobj1.db.dfdict['key']['con2'].hidden_obj, self.dbobj2) self.assertEqual(self.dbobj1.db.dfdict['key']['con2'].hidden_obj, self.dbobj2)
def test_runpath_roundtrip(tmp_path, storage, surface): config = SurfaceConfig('some_name', forward_init=True, ncol=surface.ncol, nrow=surface.nrow, xori=surface.xori, yori=surface.yori, xinc=surface.xinc, yinc=surface.yinc, rotation=surface.rotation, yflip=surface.yflip, forward_init_file='input_%d', output_file=(tmp_path / 'output'), base_surface_path='base_surface') ensemble = storage.create_experiment(parameters=[config]).create_ensemble(name='text', ensemble_size=1) surface.to_file((tmp_path / 'input_0'), fformat='irap_ascii') ds = config.read_from_runpath(tmp_path, 0) ensemble.save_parameters(config.name, 0, ds) config.forward_init_file = 'output_%d' config.write_to_runpath(tmp_path, 0, ensemble) actual_surface = xtgeo.surface_from_file((tmp_path / 'output'), fformat='irap_ascii', dtype=np.float32) np.testing.assert_allclose(actual_surface.values, surface.values, rtol=0, atol=1e-06) for (prop, val) in (('ncol', 5), ('nrow', 3), ('xori', 3), ('yori', 4), ('xinc', 1), ('yinc', 2), ('yflip', 1.0), ('rotation', 10)): assert (getattr(config, prop) == getattr(actual_surface, prop) == val), f'Failed for: {prop}'
def test_second_consecutive_focus_requests_ignored(flash_server: FlashServer, windows: list[Window]) -> None: expected_calls = ([call(WMEvent(window=windows[1], event_type=WMEventType.FOCUS_SHIFT))] * 2) flash_server.router.route_request = MagicMock() with watching_windows(windows) as watchers: with server_running(flash_server): change_focus(windows[1]) change_focus(windows[1]) actual_calls = flash_server.router.route_request.call_args_list assert (actual_calls[3:] == expected_calls) (watchers[0].count_flashes() == 1)
def accept_better(subject, against): if ('Accept' in request.headers): accept = request.headers['Accept'].lower() try: isub = accept.index(subject) except ValueError: return False try: iaga = accept.index(against) except ValueError: return True return (isub < iaga) else: return False
def test_idx_in_2nd_array(): arr1 = np.array([1, 6, 4.6, 3.4, 6, 1, 'Hallo', 'hallo', 'Hallo']) arr2 = np.array([8, 4, 1, 2, 5, 5.6, 4.6, 'Hallo', 'hallo', 6, 3.4]) expected_res = np.array([2, 9, 6, 10, 9, 2, 7, 8, 7]) res = sb.idx_in_2nd_array(arr1, arr2) assert all((res == expected_res)) arr2[(- 1)] = 4.7 expected_res[3] = 1 res = sb.idx_in_2nd_array(arr1, arr2, match=False) assert all((res == expected_res)) try: sb.idx_in_2nd_array(arr1, arr2, match=True) except_ = False except ValueError: except_ = True assert except_
class TestColumnRegExp(BaseCheckValueTest, ABC): group: ClassVar = DATA_INTEGRITY_GROUP.id name: ClassVar = 'RegExp Match' _metric: ColumnRegExpMetric column_name: str reg_exp: str def __init__(self, column_name: str, reg_exp: str, eq: Optional[Numeric]=None, gt: Optional[Numeric]=None, gte: Optional[Numeric]=None, is_in: Optional[List[Union[(Numeric, str, bool)]]]=None, lt: Optional[Numeric]=None, lte: Optional[Numeric]=None, not_eq: Optional[Numeric]=None, not_in: Optional[List[Union[(Numeric, str, bool)]]]=None, is_critical: bool=True): self.column_name = column_name self.reg_exp = reg_exp super().__init__(eq=eq, gt=gt, gte=gte, is_in=is_in, lt=lt, lte=lte, not_eq=not_eq, not_in=not_in, is_critical=is_critical) self._metric = ColumnRegExpMetric(column_name=self.column_name, reg_exp=self.reg_exp) def metric(self): return self._metric def groups(self) -> Dict[(str, str)]: if (self.column_name is not None): return {GroupingTypes.ByFeature.id: self.column_name} return {} def get_condition(self) -> TestValueCondition: if self.condition.has_condition(): return self.condition metric_result = self.metric.get_result() if metric_result.reference: ref_value = metric_result.reference.number_of_not_matched mult = (metric_result.current.number_of_rows / metric_result.reference.number_of_rows) if (mult is not None): return TestValueCondition(eq=approx((ref_value * mult), relative=0.1), source=ValueSource.REFERENCE) return TestValueCondition(eq=0) def calculate_value_for_test(self) -> Optional[Numeric]: return self.metric.get_result().current.number_of_not_matched def get_description(self, value: Numeric) -> str: return f'The number of the mismatched values in the column **{self.column_name}** is {value}. The test threshold is {self.get_condition()}.'
def _perm102_bmm_checker(bmm_op: Operator, cat_op: Operator) -> bool: input = bmm_op._attrs['inputs'][0] output = bmm_op._attrs['outputs'][0] if ((output._rank() != 2) or (input._size(0) != output._size(0))): return False cat_dim = cat_op._attrs['concat_dim'] return ((cat_dim == 1) and transform_strided_ops_utils.gemm_stride_checker(bmm_op._attrs['output_accessors'][0], cat_op._attrs['concat_dim'], get_stride_at_dim=0))
def pathconf(path, os_name=os.name, isdir_fnc=os.path.isdir, pathconf_fnc=getattr(os, 'pathconf', None), pathconf_names=getattr(os, 'pathconf_names', ())): if (pathconf_fnc and pathconf_names): pathconf_output = {} for key in pathconf_names: try: pathconf_output[key] = pathconf_fnc(path, key) except OSError as exc: if (exc.errno != errno.EINVAL): raise return pathconf_output if (os_name == 'nt'): maxpath = (246 if isdir_fnc(path) else 259) else: maxpath = 255 return {'PC_PATH_MAX': maxpath, 'PC_NAME_MAX': (maxpath - len(path))}
def extractLordobsidianCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [("Martial King's Retired Life", "Martial King's Retired Life", 'translated'), ('!!', 'Oh No! After I Reincarnated, My Moms Became Son-cons!', 'translated'), ('', 'My Yandere-Succubus Daughter is Mommy-Warriors Natural Enemy', 'translated'), ('Yandere Succubus', 'My Yandere-Succubus Daughter is Mommy-Warriors Natural Enemy', 'translated'), ('MYSD', 'My Yandere-Succubus Daughter is Mommy-Warriors Natural Enemy', 'translated'), ('AATG', 'Apotheosis Ascension to Godhood', 'translated')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def jsConvert(data: Any, jsDataKey: Union[(str, primitives.JsDataModel)], isPyData: bool, js_funcs: Union[(list, str)]): if isPyData: if hasattr(data, 'toStr'): return data.toStr() else: try: return json.dumps(data) except Exception as err: if isinstance(data, range): return json.dumps(list(data)) raise if (jsDataKey is not None): data = ('%s[%s]' % (data, jsConvertData(jsDataKey, None))) if (js_funcs is not None): data = ('%s(%s)' % (js_funcs, data)) return data
class StandardBackend(Backend): def __init__(self, examples, params, *args, **kwargs): self.init_fallback_backend() if (not params): self.current_values = _coconut.dict() return self.setup_backend(params, *args, **kwargs) if examples: self.tell_examples(examples) else: self.current_values = _coconut.dict() def tell_examples(self, new_examples): (new_data, new_losses) = get_named_data_points_and_losses(new_examples, self._params) self.tell_data(new_data, new_losses) self.current_values = self.get_next_values() def setup_backend(self, params, *args, **kwargs): raise NotImplementedError('StandardBackend subclasses using StandardBackend.__init__ must define a setup_backend(params, *args, **kwargs) method') def tell_data(self, new_data, new_losses): raise NotImplementedError('StandardBackend subclasses using StandardBackend.tell_examples must define a tell_data(new_data, new_losses) method') def get_next_values(self): raise NotImplementedError('StandardBackend subclasses using StandardBackend.tell_examples must define a get_next_values() method')
class TestWorkerAssignment(): def test_single_host_assignment_clients_matches_cores(self): host_configs = [{'host': 'localhost', 'cores': 4}] assignments = driver.calculate_worker_assignments(host_configs, client_count=4) assert (assignments == [{'host': 'localhost', 'workers': [[0], [1], [2], [3]]}]) def test_single_host_assignment_more_clients_than_cores(self): host_configs = [{'host': 'localhost', 'cores': 4}] assignments = driver.calculate_worker_assignments(host_configs, client_count=6) assert (assignments == [{'host': 'localhost', 'workers': [[0, 1], [2, 3], [4], [5]]}]) def test_single_host_assignment_less_clients_than_cores(self): host_configs = [{'host': 'localhost', 'cores': 4}] assignments = driver.calculate_worker_assignments(host_configs, client_count=2) assert (assignments == [{'host': 'localhost', 'workers': [[0], [1], [], []]}]) def test_multiple_host_assignment_more_clients_than_cores(self): host_configs = [{'host': 'host-a', 'cores': 4}, {'host': 'host-b', 'cores': 4}] assignments = driver.calculate_worker_assignments(host_configs, client_count=16) assert (assignments == [{'host': 'host-a', 'workers': [[0, 1], [2, 3], [4, 5], [6, 7]]}, {'host': 'host-b', 'workers': [[8, 9], [10, 11], [12, 13], [14, 15]]}]) def test_multiple_host_assignment_less_clients_than_cores(self): host_configs = [{'host': 'host-a', 'cores': 4}, {'host': 'host-b', 'cores': 4}] assignments = driver.calculate_worker_assignments(host_configs, client_count=4) assert (assignments == [{'host': 'host-a', 'workers': [[0], [1], [], []]}, {'host': 'host-b', 'workers': [[2], [3], [], []]}]) def test_uneven_assignment_across_hosts(self): host_configs = [{'host': 'host-a', 'cores': 4}, {'host': 'host-b', 'cores': 4}, {'host': 'host-c', 'cores': 4}] assignments = driver.calculate_worker_assignments(host_configs, client_count=17) assert (assignments == [{'host': 'host-a', 'workers': [[0, 1], [2, 3], [4], [5]]}, {'host': 'host-b', 'workers': [[6, 7], [8, 9], [10], [11]]}, {'host': 'host-c', 'workers': [[12, 13], [14], [15], [16]]}])
def nonlinear_poisson_bbc(solver_parameters, mesh_num, porder): mesh = UnitSquareMesh(mesh_num, mesh_num) V = FunctionSpace(mesh, 'CG', porder) u = Function(V) v = TestFunction(V) f = Function(V) (x, y) = SpatialCoordinate(mesh) f.interpolate((((((- 8.0) * pi) * pi) * cos(((x * pi) * 2))) * cos(((y * pi) * 2)))) a = ((- inner(grad(u), grad(v))) * dx) L = (inner(f, v) * dx) e2 = as_vector([0.0, 1.0]) a1 = (((- inner(dot(grad(u), e2), dot(grad(v), e2))) + (((4 * pi) * pi) * inner(u, v))) * ds(1)) g = Function(V).interpolate((cos(((2 * pi) * x)) * cos(((2 * pi) * y)))) bbc = DirichletBC(V, g, ((1, 3), (1, 4))) bc1 = EquationBC((a1 == 0), u, 1, bcs=[bbc]) solve(((a - L) == 0), u, bcs=[bc1], solver_parameters=solver_parameters) f.interpolate((cos(((x * pi) * 2)) * cos(((y * pi) * 2)))) return sqrt(assemble((inner((u - f), (u - f)) * dx)))
def get_parser(): parser = argparse.ArgumentParser(description='The Facebook Ads Library API CLI Utility') parser.add_argument('-t', '--access-token', help='The Facebook developer access token', required=True) parser.add_argument('-f', '--fields', help='Fields to retrieve from the Ad Library API', required=True, type=validate_fields_param) parser.add_argument('-s', '--search-term', help='The term you want to search for') parser.add_argument('-c', '--country', help='Comma-separated country code (no spaces)', required=True, type=validate_country_param) parser.add_argument('--search-page-ids', help='The specific Facebook Page you want to search') parser.add_argument('--ad-active-status', help='Filter by the current status of the ads at the moment the script runs') parser.add_argument('--after-date', help='Only return ads that started delivery after this date') parser.add_argument('--batch-size', type=int, help='Batch size') parser.add_argument('--retry-limit', type=int, help='When an error occurs, the script will abort if it fails to get the same batch this amount of times') parser.add_argument('-v', '--verbose', action='store_true') actions = ','.join(get_operators().keys()) parser.add_argument('action', help=('Action to take on the ads, possible values: %s' % actions)) parser.add_argument('args', nargs=argparse.REMAINDER, help='The parameter for the specific action') return parser
_dataclasses.dataclass(frozen=True, kw_only=True) class EventarcTriggerOptions(EventHandlerOptions): event_type: str channel: (str | None) = None filters: (dict[(str, str)] | None) = None def _endpoint(self, **kwargs) -> _manifest.ManifestEndpoint: event_filters = ({} if (self.filters is None) else self.filters) endpoint = _manifest.ManifestEndpoint(**_typing.cast(_typing.Dict, _dataclasses.asdict(super()._endpoint(**kwargs, event_filters=event_filters, event_type=self.event_type)))) assert (endpoint.eventTrigger is not None) channel = (self.channel if (self.channel is not None) else 'locations/us-central1/channels/firebase') endpoint.eventTrigger['channel'] = channel return endpoint def _required_apis(self) -> list[_manifest.ManifestRequiredApi]: return [_manifest.ManifestRequiredApi(api='eventarcpublishing.googleapis.com', reason='Needed for custom event functions')]
def test_enum_evolution_using_default(): original_schema = {'type': 'enum', 'name': 'test', 'symbols': ['A', 'B']} new_schema = {'type': 'enum', 'name': 'test', 'symbols': ['C', 'D'], 'default': 'C'} original_records = ['A'] bio = BytesIO() fastavro.writer(bio, original_schema, original_records) bio.seek(0) new_records = list(fastavro.reader(bio, new_schema)) assert (new_records == ['C'])
class SearchExportApi(Api): def __init__(self, config): super(SearchExportApi, self).__init__(config, object_type='results', endpoint=EndpointFactory('search_export')) self._object_mapping = ZendeskObjectMapping(self) def __call__(self, *args, **kwargs): return self._query_zendesk(self.endpoint, self.object_type, *args, **kwargs)
class RaindropsTest(unittest.TestCase): def test_the_sound_for_1_is_1(self): self.assertEqual(convert(1), '1') def test_the_sound_for_3_is_pling(self): self.assertEqual(convert(3), 'Pling') def test_the_sound_for_5_is_plang(self): self.assertEqual(convert(5), 'Plang') def test_the_sound_for_7_is_plong(self): self.assertEqual(convert(7), 'Plong') def test_the_sound_for_6_is_pling_as_it_has_a_factor_3(self): self.assertEqual(convert(6), 'Pling') def test_2_to_the_power_3_does_not_make_a_raindrop_sound_as_3_is_the_exponent_not_the_base(self): self.assertEqual(convert(8), '8') def test_the_sound_for_9_is_pling_as_it_has_a_factor_3(self): self.assertEqual(convert(9), 'Pling') def test_the_sound_for_10_is_plang_as_it_has_a_factor_5(self): self.assertEqual(convert(10), 'Plang') def test_the_sound_for_14_is_plong_as_it_has_a_factor_of_7(self): self.assertEqual(convert(14), 'Plong') def test_the_sound_for_15_is_pling_plang_as_it_has_factors_3_and_5(self): self.assertEqual(convert(15), 'PlingPlang') def test_the_sound_for_21_is_pling_plong_as_it_has_factors_3_and_7(self): self.assertEqual(convert(21), 'PlingPlong') def test_the_sound_for_25_is_plang_as_it_has_a_factor_5(self): self.assertEqual(convert(25), 'Plang') def test_the_sound_for_27_is_pling_as_it_has_a_factor_3(self): self.assertEqual(convert(27), 'Pling') def test_the_sound_for_35_is_plang_plong_as_it_has_factors_5_and_7(self): self.assertEqual(convert(35), 'PlangPlong') def test_the_sound_for_49_is_plong_as_it_has_a_factor_7(self): self.assertEqual(convert(49), 'Plong') def test_the_sound_for_52_is_52(self): self.assertEqual(convert(52), '52') def test_the_sound_for_105_is_pling_plang_plong_as_it_has_factors_3_5_and_7(self): self.assertEqual(convert(105), 'PlingPlangPlong') def test_the_sound_for_3125_is_plang_as_it_has_a_factor_5(self): self.assertEqual(convert(3125), 'Plang')
def downgrade(): for table_name in tables_to_update: logger.info(('downgrading table: %s' % table_name)) sql = '\n -- Add the time zone offset\n UPDATE\n "{table_name}"\n SET\n '.format(table_name=table_name) for (i, column_name) in enumerate(tables_to_update[table_name]): if (i > 0): sql = '{sql},\n'.format(sql=sql) sql = '{sql}\n "{column_name}" = (\n SELECT\n CAST(aliased_table.{column_name} at time zone \'utc\' AS timestamp with time zone)\n FROM "{table_name}" as aliased_table\n where aliased_table.id = "{table_name}".id\n )'.format(sql=sql, column_name=column_name, table_name=table_name) op.execute(sql) logger.info(('raw sql completed for table: %s' % table_name)) with op.batch_alter_table(table_name) as batch_op: for column_name in tables_to_update[table_name]: batch_op.alter_column(column_name, type_=sa.DateTime(timezone=False)) logger.info(('done downgrading table: %s' % table_name))
def testbot(request) -> TestBot: def on_finish() -> TestBot: bot.stop() logger = logging.getLogger('') logging.getLogger('MARKDOWN').setLevel(logging.ERROR) logger.setLevel(logging.DEBUG) console_hdlr = logging.StreamHandler(sys.stdout) console_hdlr.setFormatter(logging.Formatter('%(levelname)-8s %(name)-25s %(message)s')) logger.handlers = [] logger.addHandler(console_hdlr) kwargs = {} for (attr, default) in (('extra_plugin_dir', None), ('extra_config', None), ('loglevel', logging.DEBUG)): if hasattr(request, 'instance'): kwargs[attr] = getattr(request.instance, attr, None) if (kwargs[attr] is None): kwargs[attr] = getattr(request.module, attr, default) bot = TestBot(**kwargs) bot.start() request.addfinalizer(on_finish) return bot
class CustomFieldOption(BaseObject): def __init__(self, api=None, id=None, name=None, position=None, raw_name=None, url=None, value=None, **kwargs): self.api = api self.id = id self.name = name self.position = position self.raw_name = raw_name self.url = url self.value = value for (key, value) in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if (getattr(self, key) is None): try: self._dirty_attributes.remove(key) except KeyError: continue
def add_iteration(df: pd.DataFrame, symbol_table: TraceSymbolTable) -> pd.DataFrame: s_map = pd.Series(symbol_table.sym_index) s_tab = pd.Series(symbol_table.sym_table) profiler_step_ids = s_map[s_map.index.str.startswith('ProfilerStep')] profiler_step_ids.sort_index() def _extract_iter(profiler_step_name_id: int) -> int: s = s_tab[profiler_step_name_id] m = re.match('ProfilerStep\\s*#\\s*(\\d+)', s) return int(m.group(1)) profiler_steps = df.loc[df['name'].isin(profiler_step_ids.values)] profiler_steps = profiler_steps[['ts', 'dur', 'name']].copy() profiler_steps['s_name'] = profiler_steps['name'].apply(_extract_iter) profiler_steps['iter'] = profiler_steps['name'].apply((lambda idx: s_tab[idx])) profiler_steps_array = profiler_steps.to_numpy() def _get_profiler_step(ts: int) -> int: iter = (- 1) for step in profiler_steps_array: if (step[0] <= ts < (step[0] + step[1])): iter = step[3] return iter df.loc[(df['stream'].lt(0), 'iteration')] = df['ts'].apply(_get_profiler_step) df.loc[(df['stream'].gt(0), 'iteration')] = df['index_correlation'].apply((lambda x: (df.loc[(x, 'iteration')] if (x > 0) else (- 1)))) df['iteration'] = pd.to_numeric(df['iteration'], downcast='integer') return profiler_steps
def cole_perm(inp, p_dict): iotc = (np.outer(((2j * np.pi) * p_dict['freq']), inp['tau']) ** inp['c']) jw = np.outer(((2j * np.pi) * p_dict['freq']), np.ones(2)) epsilonH = (inp['eperm_8'] + ((inp['eperm_0'] - inp['eperm_8']) / (1 + iotc))) epsilonV = (epsilonH / (p_dict['aniso'] ** 2)) etaH = ((1 / inp['rho_0']) + (jw * epsilonH)) etaV = ((1 / inp['rho_0']) + (jw * epsilonV)) return (etaH, etaV)
class Gaussian(Distribution): def __init__(self, mean, variance): if (mean is None): self.mean = 0.0 else: self.mean = mean if (variance is None): self.variance = 1.0 else: self.variance = variance if (self.variance <= 0): raise ValueError('Invalid Gaussian distribution parameters. Variance should be positive.') self.sigma = np.sqrt(self.variance) self.x_range_for_pdf = (np.linspace(((- 15.0) * self.sigma), (15.0 * self.sigma), RECURRENCE_PDF_SAMPLES) + self.mean) self.parent = norm(loc=self.mean, scale=self.sigma) self.skewness = 0.0 self.kurtosis = 0.0 self.bounds = np.array([(- np.inf), np.inf]) def get_description(self): text = (((('is a Gaussian distribution with a mean of ' + str(self.mean)) + ' and a variance of ') + str(self.variance)) + '.') return text def get_samples(self, m=None): if (m is not None): number = m else: number = 500000 return self.parent.rvs(size=number) def get_pdf(self, points=None): return self.parent.pdf(points) def get_cdf(self, points=None): return self.parent.cdf(points) def get_icdf(self, xx): return self.parent.ppf(xx)
class OptionSeriesAreasplineSonificationContexttracksMappingTremoloSpeed(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class OptionPlotoptionsSolidgaugeSonificationContexttracksMappingHighpass(Options): def frequency(self) -> 'OptionPlotoptionsSolidgaugeSonificationContexttracksMappingHighpassFrequency': return self._config_sub_data('frequency', OptionPlotoptionsSolidgaugeSonificationContexttracksMappingHighpassFrequency) def resonance(self) -> 'OptionPlotoptionsSolidgaugeSonificationContexttracksMappingHighpassResonance': return self._config_sub_data('resonance', OptionPlotoptionsSolidgaugeSonificationContexttracksMappingHighpassResonance)
class HeadsRateModelTest(unittest.TestCase): def test_beta_bernoulli_conjugate_graph(self) -> None: (_, heads_rate_model_graph) = HeadsRateModel().run() (_, heads_rate_model_transformed_graph) = HeadsRateModelTransformed().run() self.assertEqual(heads_rate_model_graph, heads_rate_model_transformed_graph)
class TestMassEditWithFile(unittest.TestCase): def setUp(self): self.editor = massedit.MassEdit() self.workspace = Workspace() self.file_name = os.path.join(self.workspace.top_dir, unicode('somefile.txt')) def tearDown(self): self.workspace.cleanup() def write_input_file(self, text, encoding=None): if (not encoding): encoding = 'utf-8' with io.open(self.file_name, 'w+', encoding=encoding) as fh: fh.write(text) def test_non_utf8_with_utf8_setting(self): log_sink = LogInterceptor(massedit.log) content = unicode('This is ok\nThis not') self.write_input_file(content, encoding='cp1252') def identity(lines, _): for line in lines: (yield line) self.editor.append_function(identity) with self.assertRaises(UnicodeDecodeError): _ = self.editor.edit_file(self.file_name) self.assertIn('encoding error', log_sink.log) def test_handling_of_cp1252(self): encoding = 'cp1252' self.editor.encoding = encoding content = unicode('This is ok\nThis not') self.write_input_file(content, encoding=encoding) def identity(lines, _): for line in lines: (yield line) self.editor.append_function(identity) diffs = self.editor.edit_file(self.file_name) self.assertEqual(diffs, []) def test_forcing_end_of_line_for_output_files(self): self.editor.newline = '\n' content = 'This is a line finishing with CRLF\r\n' self.write_input_file(content) def identity(lines, _): for line in lines: (yield line) self.editor.append_function(identity) diffs = self.editor.edit_file(self.file_name) self.assertEqual(diffs, []) with io.open(self.file_name) as f: f.readline() output_newline = f.newlines expected_eol = self.editor.newline if (expected_eol is None): expected_eol = os.linesep self.assertEqual(expected_eol, output_newline)
def test_assign_to_vfs_sub(cg1, vcg1): v = Function(cg1).assign(2) w = Function(vcg1).assign(0) w.sub(0).assign(v) assert np.allclose(w.sub(0).dat.data_ro, 2) assert np.allclose(w.sub(1).dat.data_ro, 0) v.assign(w.sub(1)) assert np.allclose(v.dat.data_ro, 0) v += w.sub(0) assert np.allclose(v.dat.data_ro, 2)
class WhatsAppClient(): API_URL = ' WHATSAPP_API_TOKEN = '<Temporary access token from your WhatsApp API Setup>' WHATSAPP_CLOUD_NUMBER_ID = '<Phone number ID from your WhatsApp API Setup>' def __init__(self): self.headers = {'Authorization': f'Bearer {self.WHATSAPP_API_TOKEN}', 'Content-Type': 'application/json'} self.API_URL = (self.API_URL + self.WHATSAPP_CLOUD_NUMBER_ID) def send_text_message(self, message, phone_number): payload = {'messaging_product': 'whatsapp', 'to': phone_number, 'type': 'text', 'text': {'preview_url': False, 'body': message}} response = requests.post(f'{self.API_URL}/messages', json=payload, headers=self.headers) print(response.status_code) assert (response.status_code == 200), 'Error sending message' return response.status_code
class Migration(migrations.Migration): initial = True dependencies = [] operations = [migrations.CreateModel(name='Project', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateField(auto_now_add=True)), ('updated_at', models.DateField(auto_now=True)), ('title', models.CharField(max_length=100)), ('desription', models.TextField(blank=True, default='')), ('url', models.CharField(max_length=255)), ('level', models.IntegerField(choices=[(1, 'Level 1'), (2, 'Level 2')], default=1)), ('required', models.BooleanField(default=True))], options={'abstract': False})]
class TestTableTrainingTeiParser(): def test_should_parse_single_token_labelled_training_tei_lines(self): tei_root = _get_training_tei_with_text([E('head', TOKEN_1, E('lb')), '\n', E('p', TOKEN_2, E('lb')), '\n']) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'B-<section>'), (TOKEN_2, 'B-<paragraph>')]]) def test_should_parse_single_label_with_multiple_lines(self): tei_root = _get_training_tei_with_text([E('p', TOKEN_1, E('lb'), '\n', TOKEN_2, E('lb')), '\n']) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'B-<paragraph>'), (TOKEN_2, 'I-<paragraph>')]]) def test_should_output_multiple_tokens_of_each_unlabelled_lines(self): tei_root = _get_training_tei_with_text([TOKEN_1, ' ', TOKEN_2, E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, E('lb'), '\n']) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) assert (tag_result == [[(TOKEN_1, 'O'), (TOKEN_2, 'O'), (TOKEN_3, 'O'), (TOKEN_4, 'O')]]) def test_should_parse_single_label_with_multiple_tokens_on_multiple_lines(self): tei_root = _get_training_tei_with_text([E('p', TOKEN_1, ' ', TOKEN_2, E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, E('lb'), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) assert (tag_result == [[(TOKEN_1, 'B-<paragraph>'), (TOKEN_2, 'I-<paragraph>'), (TOKEN_3, 'I-<paragraph>'), (TOKEN_4, 'I-<paragraph>')]]) def test_should_continue_paragraph_after_child_element(self): tei_root = _get_training_tei_with_text([E('p', TOKEN_1, E('lb'), '\n', E('ref', {'type': 'biblio'}, TOKEN_2, E('lb')), '\n', TOKEN_3, E('lb'), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) assert (tag_result == [[(TOKEN_1, 'B-<paragraph>'), (TOKEN_2, 'B-<citation_marker>'), (TOKEN_3, 'I-<paragraph>')]]) .parametrize('tei_label,element_path', list(TRAINING_XML_ELEMENT_PATH_BY_LABEL.items())) def test_should_parse_all_supported_labels(self, tei_label: str, element_path: Sequence[str]): xml_writer = XmlTreeWriter(E('tei'), element_maker=E) xml_writer.require_path(element_path) xml_writer.append_all(TOKEN_1, ' ', TOKEN_2, E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, E('lb')) tei_root = xml_writer.root LOGGER.debug('tei_root: %r', etree.tostring(tei_root)) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) if ((tei_label in OTHER_LABELS) or (element_path == ROOT_TRAINING_XML_ELEMENT_PATH)): assert (tag_result == [[(TOKEN_1, '<other>'), (TOKEN_2, '<other>'), (TOKEN_3, '<other>'), (TOKEN_4, '<other>')]]) else: assert (tag_result == [[(TOKEN_1, f'B-{tei_label}'), (TOKEN_2, f'I-{tei_label}'), (TOKEN_3, f'I-{tei_label}'), (TOKEN_4, f'I-{tei_label}')]])
class TestThreatMappings(BaseRuleTest): def test_technique_deprecations(self): replacement_map = attack.load_techniques_redirect() revoked = list(attack.revoked) deprecated = list(attack.deprecated) for rule in self.all_rules: revoked_techniques = {} threat_mapping = rule.contents.data.threat if threat_mapping: for entry in threat_mapping: for technique in (entry.technique or []): if (technique.id in (revoked + deprecated)): revoked_techniques[technique.id] = replacement_map.get(technique.id, 'DEPRECATED - DO NOT USE') if revoked_techniques: old_new_mapping = '\n'.join((f'Actual: {k} -> Expected {v}' for (k, v) in revoked_techniques.items())) self.fail(f'''{self.rule_str(rule)} Using deprecated ATT&CK techniques: {old_new_mapping}''') def test_tactic_to_technique_correlations(self): for rule in self.all_rules: threat_mapping = (rule.contents.data.threat or []) if threat_mapping: for entry in threat_mapping: tactic = entry.tactic techniques = (entry.technique or []) mismatched = [t.id for t in techniques if (t.id not in attack.matrix[tactic.name])] if mismatched: self.fail(f"mismatched ATT&CK techniques for rule: {self.rule_str(rule)} {', '.join(mismatched)} not under: {tactic['name']}") expected_tactic = attack.tactics_map[tactic.name] self.assertEqual(expected_tactic, tactic.id, f'''ATT&CK tactic mapping error for rule: {self.rule_str(rule)} expected: {expected_tactic} for {tactic.name} actual: {tactic.id}''') tactic_reference_id = tactic.reference.rstrip('/').split('/')[(- 1)] self.assertEqual(tactic.id, tactic_reference_id, f'''ATT&CK tactic mapping error for rule: {self.rule_str(rule)} tactic ID {tactic.id} does not match the reference URL ID {tactic.reference}''') for technique in techniques: expected_technique = attack.technique_lookup[technique.id]['name'] self.assertEqual(expected_technique, technique.name, f'''ATT&CK technique mapping error for rule: {self.rule_str(rule)} expected: {expected_technique} for {technique.id} actual: {technique.name}''') technique_reference_id = technique.reference.rstrip('/').split('/')[(- 1)] self.assertEqual(technique.id, technique_reference_id, f'''ATT&CK technique mapping error for rule: {self.rule_str(rule)} technique ID {technique.id} does not match the reference URL ID {technique.reference}''') sub_techniques = (technique.subtechnique or []) if sub_techniques: for sub_technique in sub_techniques: expected_sub_technique = attack.technique_lookup[sub_technique.id]['name'] self.assertEqual(expected_sub_technique, sub_technique.name, f'''ATT&CK sub-technique mapping error for rule: {self.rule_str(rule)} expected: {expected_sub_technique} for {sub_technique.id} actual: {sub_technique.name}''') sub_technique_reference_id = '.'.join(sub_technique.reference.rstrip('/').split('/')[(- 2):]) self.assertEqual(sub_technique.id, sub_technique_reference_id, f'''ATT&CK sub-technique mapping error for rule: {self.rule_str(rule)} sub-technique ID {sub_technique.id} does not match the reference URL ID {sub_technique.reference}''') def test_duplicated_tactics(self): for rule in self.all_rules: threat_mapping = rule.contents.data.threat tactics = [t.tactic.name for t in (threat_mapping or [])] duplicates = sorted(set((t for t in tactics if (tactics.count(t) > 1)))) if duplicates: self.fail(f'{self.rule_str(rule)} duplicate tactics defined for {duplicates}. Flatten to a single entry per tactic')
class GCRARCFileEntry(): def __init__(self, offset, file_bytes, string_table_bytes): self.file_id = struct.unpack_from('>H', file_bytes, (offset + 0))[0] self.name_hash = struct.unpack_from('>H', file_bytes, (offset + 2))[0] self.flags = file_bytes[(offset + 4)] self.name_offset = (struct.unpack_from('>I', file_bytes, (offset + 4))[0] & ) self.data_offset = struct.unpack_from('>I', file_bytes, (offset + 8))[0] self.data_size = struct.unpack_from('>I', file_bytes, (offset + 12))[0] self.name = read_string_from_bytes(self.name_offset, string_table_bytes) self.parent_node: Optional[GCRARCNode] = None def emit_to_filesystem(self, dir_path: Path, file_data_offset, file_bytes): if ((self.flags & int(GCRARCFlags.IS_DIR)) != 0): return file_path = (dir_path / self.get_full_file_path()) file_data = file_bytes[(file_data_offset + self.data_offset):((file_data_offset + self.data_offset) + self.data_size)] with open(file_path, 'wb') as f: f.write(file_data) def emit_config(self, level): if ((self.flags & int(GCRARCFlags.IS_DIR)) != 0): return lines = [] lines.append(((' ' * level) + f''' - name: "{self.name}" ''')) lines.append(((' ' * level) + f''' file_id: 0x{self.file_id:04X} ''')) if ((self.flags & int(GCRARCFlags.IS_COMPRESSED)) != 0): if ((self.flags & int(GCRARCFlags.IS_YAZ0_COMPRESSED)) != 0): lines.append(((' ' * level) + f''' compression: yaz0 ''')) else: lines.append(((' ' * level) + f''' compression: yay0 ''')) if ((self.flags & int(GCRARCFlags.PRELOAD_TO_MRAM)) == 0): if ((self.flags & int(GCRARCFlags.PRELOAD_TO_ARAM)) != 0): lines.append(((' ' * level) + f''' preload_type: aram ''')) else: lines.append(((' ' * level) + f''' preload_type: dvd ''')) return lines def get_full_file_path(self): path_components = [self.name] node = self.parent_node while (node is not None): path_components.insert(0, node.name) node = node.parent return Path('/'.join(path_components))
('cuda.gemm_rcr_permute.gen_profiler') def gen_profiler(func_attrs, workdir, profiler_filename, dim_info_dict, extra_code=''): extra_code = f'''{common_permute.EXTRA_CODE.render()} {extra_code}''' return common_gen_profiler(func_attrs, workdir, profiler_filename, dim_info_dict, common.SRC_TEMPLATE, PROBLEM_ARGS_TEMPLATE, extra_code=extra_code)
class DomainInspectorMeasurements(ModelNormal): allowed_values = {} validations = {} _property def additional_properties_type(): return (bool, date, datetime, dict, float, int, list, str, none_type) _nullable = False _property def openapi_types(): return {'edge_requests': (int,), 'edge_resp_header_bytes': (int,), 'edge_resp_body_bytes': (int,), 'status_1xx': (int,), 'status_2xx': (int,), 'status_3xx': (int,), 'status_4xx': (int,), 'status_5xx': (int,), 'status_200': (int,), 'status_204': (int,), 'status_206': (int,), 'status_301': (int,), 'status_302': (int,), 'status_304': (int,), 'status_400': (int,), 'status_401': (int,), 'status_403': (int,), 'status_404': (int,), 'status_416': (int,), 'status_429': (int,), 'status_500': (int,), 'status_501': (int,), 'status_502': (int,), 'status_503': (int,), 'status_504': (int,), 'status_505': (int,), 'requests': (int,), 'resp_header_bytes': (int,), 'resp_body_bytes': (int,), 'bereq_header_bytes': (int,), 'bereq_body_bytes': (int,), 'edge_hit_requests': (int,), 'edge_miss_requests': (int,), 'origin_fetches': (int,), 'origin_fetch_resp_header_bytes': (int,), 'origin_fetch_resp_body_bytes': (int,), 'bandwidth': (int,), 'edge_hit_ratio': (float,), 'origin_offload': (float,), 'origin_status_200': (int,), 'origin_status_204': (int,), 'origin_status_206': (int,), 'origin_status_301': (int,), 'origin_status_302': (int,), 'origin_status_304': (int,), 'origin_status_400': (int,), 'origin_status_401': (int,), 'origin_status_403': (int,), 'origin_status_404': (int,), 'origin_status_416': (int,), 'origin_status_429': (int,), 'origin_status_500': (int,), 'origin_status_501': (int,), 'origin_status_502': (int,), 'origin_status_503': (int,), 'origin_status_504': (int,), 'origin_status_505': (int,), 'origin_status_1xx': (int,), 'origin_status_2xx': (int,), 'origin_status_3xx': (int,), 'origin_status_4xx': (int,), 'origin_status_5xx': (int,)} _property def discriminator(): return None attribute_map = {'edge_requests': 'edge_requests', 'edge_resp_header_bytes': 'edge_resp_header_bytes', 'edge_resp_body_bytes': 'edge_resp_body_bytes', 'status_1xx': 'status_1xx', 'status_2xx': 'status_2xx', 'status_3xx': 'status_3xx', 'status_4xx': 'status_4xx', 'status_5xx': 'status_5xx', 'status_200': 'status_200', 'status_204': 'status_204', 'status_206': 'status_206', 'status_301': 'status_301', 'status_302': 'status_302', 'status_304': 'status_304', 'status_400': 'status_400', 'status_401': 'status_401', 'status_403': 'status_403', 'status_404': 'status_404', 'status_416': 'status_416', 'status_429': 'status_429', 'status_500': 'status_500', 'status_501': 'status_501', 'status_502': 'status_502', 'status_503': 'status_503', 'status_504': 'status_504', 'status_505': 'status_505', 'requests': 'requests', 'resp_header_bytes': 'resp_header_bytes', 'resp_body_bytes': 'resp_body_bytes', 'bereq_header_bytes': 'bereq_header_bytes', 'bereq_body_bytes': 'bereq_body_bytes', 'edge_hit_requests': 'edge_hit_requests', 'edge_miss_requests': 'edge_miss_requests', 'origin_fetches': 'origin_fetches', 'origin_fetch_resp_header_bytes': 'origin_fetch_resp_header_bytes', 'origin_fetch_resp_body_bytes': 'origin_fetch_resp_body_bytes', 'bandwidth': 'bandwidth', 'edge_hit_ratio': 'edge_hit_ratio', 'origin_offload': 'origin_offload', 'origin_status_200': 'origin_status_200', 'origin_status_204': 'origin_status_204', 'origin_status_206': 'origin_status_206', 'origin_status_301': 'origin_status_301', 'origin_status_302': 'origin_status_302', 'origin_status_304': 'origin_status_304', 'origin_status_400': 'origin_status_400', 'origin_status_401': 'origin_status_401', 'origin_status_403': 'origin_status_403', 'origin_status_404': 'origin_status_404', 'origin_status_416': 'origin_status_416', 'origin_status_429': 'origin_status_429', 'origin_status_500': 'origin_status_500', 'origin_status_501': 'origin_status_501', 'origin_status_502': 'origin_status_502', 'origin_status_503': 'origin_status_503', 'origin_status_504': 'origin_status_504', 'origin_status_505': 'origin_status_505', 'origin_status_1xx': 'origin_status_1xx', 'origin_status_2xx': 'origin_status_2xx', 'origin_status_3xx': 'origin_status_3xx', 'origin_status_4xx': 'origin_status_4xx', 'origin_status_5xx': 'origin_status_5xx'} read_only_vars = {} _composed_schemas = {} _js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes']) _js_args_to_python_args def __init__(self, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
def fetch_consumption(zone_key: str='IN-PB', session: (Session | None)=None, target_datetime: (datetime | None)=None, logger: Logger=getLogger(__name__)) -> dict: if target_datetime: raise NotImplementedError('The IN-PB consumption parser is not yet able to parse past dates') s = (session or requests.Session()) req = s.get(GENERATION_URL) raw_data = req.json() consumption = float(raw_data['grossGeneration']['value']) data = {'zoneKey': zone_key, 'datetime': datetime.now(tz=ZoneInfo('Asia/Kolkata')), 'consumption': consumption, 'source': 'punjasldc.org'} return data
class HistoryItem(API): history_item_id: str request_id: Optional[str] = None voice_id: str text: str date: Optional[datetime] = None date_unix: int character_count_change_from: int character_count_change_to: int character_count_change: Optional[int] = None content_type: str settings: Optional[VoiceSettings] = None feedback: Optional[FeedbackItem] = None _audio: Optional[bytes] = None _validator(mode='after') def add_computed_fields(self): change_from = self.character_count_change_from change_to = self.character_count_change_to self.character_count_change = (change_to - change_from) self.date = datetime.utcfromtimestamp(self.date_unix) return self def from_id(cls, history_item_id: str) -> HistoryItem: url = f'{api_base_url_v1}/history/{history_item_id}' response = API.get(url).json() return cls(**response) def audio(self) -> bytes: url = f'{api_base_url_v1}/history/{self.history_item_id}/audio' if (self._audio is None): self._audio = API.get(url).content return self._audio def delete(self): API.delete(f'{api_base_url_v1}/history/{self.history_item_id}')
class Emo_gen_config(): def __init__(self, config_path: str, num_processes: int=2, device: str='cuda', use_multi_device: bool=False): self.config_path = config_path self.num_processes = num_processes self.device = device self.use_multi_device = use_multi_device def from_dict(cls, dataset_path: str, data: Dict[(str, any)]): data['config_path'] = os.path.join(dataset_path, data['config_path']) return cls(**data)
class OptionSeriesGaugeSonificationTracksMappingHighpass(Options): def frequency(self) -> 'OptionSeriesGaugeSonificationTracksMappingHighpassFrequency': return self._config_sub_data('frequency', OptionSeriesGaugeSonificationTracksMappingHighpassFrequency) def resonance(self) -> 'OptionSeriesGaugeSonificationTracksMappingHighpassResonance': return self._config_sub_data('resonance', OptionSeriesGaugeSonificationTracksMappingHighpassResonance)
class TestPlaceholderShown(util.TestCase): def test_placeholder_shown(self): markup = '\n <!-- These have a placeholder. -->\n <input id="0" placeholder="This is some text">\n <textarea id="1" placeholder="This is some text"></textarea>\n\n <!-- These do not have a placeholder. -->\n <input id="2" placeholder="">\n <input id="3">\n\n <!-- All types that should register has having a placeholder. -->\n <input id="4" type="email" placeholder="This is some text">\n <input id="5" type="number" placeholder="This is some text">\n <input id="6" type="password" placeholder="This is some text">\n <input id="7" type="search" placeholder="This is some text">\n <input id="8" type="tel" placeholder="This is some text">\n <input id="9" type="text" placeholder="This is some text">\n <input id="10" type="url" placeholder="This is some text">\n <input id="11" type="" placeholder="This is some text">\n <input id="12" type placeholder="This is some text">\n\n <!-- Types that should not register has having a placeholder. -->\n <input id="13" type="button" placeholder="This is some text">\n <input id="14" type="checkbox" placeholder="This is some text">\n <input id="15" type="color" placeholder="This is some text">\n <input id="16" type="date" placeholder="This is some text">\n <input id="17" type="datetime-local" placeholder="This is some text">\n <input id="18" type="file" placeholder="This is some text">\n <input id="19" type="hidden" placeholder="This is some text">\n <input id="20" type="image" placeholder="This is some text">\n <input id="21" type="month" placeholder="This is some text">\n <input id="22" type="radio" placeholder="This is some text">\n <input id="23" type="range" placeholder="This is some text">\n <input id="24" type="reset" placeholder="This is some text">\n <input id="25" type="submit" placeholder="This is some text">\n <input id="26" type="time" placeholder="This is some text">\n <input id="27" type="week" placeholder="This is some text">\n\n <!-- Value will not override this instance as value is empty. -->\n <input id="28" type placeholder="This is some text" value="">\n\n <!-- Value will override this input -->\n <input id="29" type placeholder="This is some text" value="Actual value">\n\n <!-- Text area content overrides the placehold-->\n <textarea id="30" placeholder="This is some text">Value</textarea>\n <textarea id="31" placeholder="This is some text">\n\n\n </textarea>\n\n <!-- Text area is still considered empty with a single new line (does not include carriage return). -->\n <textarea id="32" placeholder="This is some text">\n </textarea>\n ' self.assert_selector(markup, ':placeholder-shown', ['0', '1', '4', '5', '6', '7', '8', '9', '10', '11', '12', '28', '32'], flags=util.HTML)
def mocked_query_single_user(cognito_id): response = [{'Date subscribed': '2021-06-16T23:06:48.646688', 'GSI1PK': 'USER', 'List name': 'HSK Level 6', 'SK': 'LIST#1ebcad41-197a-123123#TRADITIONAL', 'Status': 'subscribed', 'GSI1SK': 'USER#770e2827-7666-#LIST#1ebcad41-197a-123123#TRADITIONAL', 'PK': 'USER#770e2827-7666-', 'Character set': 'traditional'}, {'GSI1PK': 'USER', 'Date created': '2021-06-16T23:06:48.467526', 'Character set preference': 'traditional', 'SK': 'USER#770e2827-7666-', 'Email address': '', 'GSI1SK': 'USER#770e2827-7666-', 'PK': 'USER#770e2827-7666-', 'User alias': 'Not set', 'User alias pinyin': 'Not set', 'User alias emoji': 'Not set'}] return response
def get_dupes(lastz_file, splitchar='|', pos=1, longfile=False): dupes = set() matches = get_dupe_matches(lastz_file, splitchar, pos, longfile) for (k, v) in matches.items(): if (len(v) > 1): for i in v: if (i != k): dupes.add(k) dupes.add(i) elif (k != v[0]): dupes.add(k) return dupes
class OptionPlotoptionsDependencywheelLevelsDatalabels(Options): def align(self): return self._config_get('undefined') def align(self, text: str): self._config(text, js_type=False) def allowOverlap(self): return self._config_get(False) def allowOverlap(self, flag: bool): self._config(flag, js_type=False) def animation(self) -> 'OptionPlotoptionsDependencywheelLevelsDatalabelsAnimation': return self._config_sub_data('animation', OptionPlotoptionsDependencywheelLevelsDatalabelsAnimation) def backgroundColor(self): return self._config_get('none') def backgroundColor(self, text: str): self._config(text, js_type=False) def borderColor(self): return self._config_get(None) def borderColor(self, text: str): self._config(text, js_type=False) def borderRadius(self): return self._config_get(0) def borderRadius(self, num: float): self._config(num, js_type=False) def borderWidth(self): return self._config_get(0) def borderWidth(self, num: float): self._config(num, js_type=False) def className(self): return self._config_get(None) def className(self, text: str): self._config(text, js_type=False) def color(self): return self._config_get(None) def color(self, text: str): self._config(text, js_type=False) def crop(self): return self._config_get(False) def crop(self, flag: bool): self._config(flag, js_type=False) def defer(self): return self._config_get(True) def defer(self, flag: bool): self._config(flag, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def filter(self) -> 'OptionPlotoptionsDependencywheelLevelsDatalabelsFilter': return self._config_sub_data('filter', OptionPlotoptionsDependencywheelLevelsDatalabelsFilter) def format(self): return self._config_get('undefined') def format(self, text: str): self._config(text, js_type=False) def formatter(self): return self._config_get(None) def formatter(self, value: Any): self._config(value, js_type=False) def inside(self): return self._config_get(True) def inside(self, flag: bool): self._config(flag, js_type=False) def nodeFormat(self): return self._config_get('undefined') def nodeFormat(self, text: str): self._config(text, js_type=False) def nodeFormatter(self): return self._config_get(None) def nodeFormatter(self, value: Any): self._config(value, js_type=False) def nullFormat(self): return self._config_get(None) def nullFormat(self, flag: bool): self._config(flag, js_type=False) def nullFormatter(self): return self._config_get(None) def nullFormatter(self, value: Any): self._config(value, js_type=False) def overflow(self): return self._config_get('justify') def overflow(self, text: str): self._config(text, js_type=False) def padding(self): return self._config_get(5) def padding(self, num: float): self._config(num, js_type=False) def position(self): return self._config_get('center') def position(self, text: str): self._config(text, js_type=False) def rotation(self): return self._config_get(0) def rotation(self, num: float): self._config(num, js_type=False) def shadow(self): return self._config_get(False) def shadow(self, flag: bool): self._config(flag, js_type=False) def shape(self): return self._config_get('square') def shape(self, text: str): self._config(text, js_type=False) def style(self): return self._config_get(None) def style(self, value: Any): self._config(value, js_type=False) def textPath(self) -> 'OptionPlotoptionsDependencywheelLevelsDatalabelsTextpath': return self._config_sub_data('textPath', OptionPlotoptionsDependencywheelLevelsDatalabelsTextpath) def useHTML(self): return self._config_get(False) def useHTML(self, flag: bool): self._config(flag, js_type=False) def verticalAlign(self): return self._config_get('undefined') def verticalAlign(self, text: str): self._config(text, js_type=False) def x(self): return self._config_get(0) def x(self, num: float): self._config(num, js_type=False) def y(self): return self._config_get('undefined') def y(self, num: float): self._config(num, js_type=False) def zIndex(self): return self._config_get(6) def zIndex(self, num: float): self._config(num, js_type=False)
def test_parent_child_span_relation(tracer: Tracer): parent_span = tracer.start_span('parent_operation') child_span = tracer.start_span('child_operation', parent_span_id=parent_span.span_id) assert (child_span.parent_span_id == parent_span.span_id) assert (child_span.trace_id == parent_span.trace_id) tracer.end_span(child_span) tracer.end_span(parent_span) assert (parent_span in tracer._get_current_storage().spans) assert (child_span in tracer._get_current_storage().spans)
class OptionPlotoptionsNetworkgraphLayoutalgorithm(Options): def approximation(self): return self._config_get(none) def approximation(self, value: Any): self._config(value, js_type=False) def attractiveForce(self): return self._config_get('function (d, k) { return k * k / d; }') def attractiveForce(self, text: str): self._config(text, js_type=True) def enableSimulation(self): return self._config_get(False) def enableSimulation(self, flag: bool): self._config(flag, js_type=False) def friction(self): return self._config_get((- 0.981)) def friction(self, num: float): self._config(num, js_type=False) def gravitationalConstant(self): return self._config_get(0.0625) def gravitationalConstant(self, num: float): self._config(num, js_type=False) def initialPositionRadius(self): return self._config_get(1) def initialPositionRadius(self, num: float): self._config(num, js_type=False) def initialPositions(self): return self._config_get(circle) def initialPositions(self, value: Any): self._config(value, js_type=False) def integration(self): return self._config_get(euler) def integration(self, value: Any): self._config(value, js_type=False) def linkLength(self): return self._config_get(None) def linkLength(self, num: float): self._config(num, js_type=False) def maxIterations(self): return self._config_get(1000) def maxIterations(self, num: float): self._config(num, js_type=False) def maxSpeed(self): return self._config_get(10) def maxSpeed(self, num: float): self._config(num, js_type=False) def repulsiveForce(self): return self._config_get('function (d, k) { return k * k / d; }') def repulsiveForce(self, text: str): self._config(text, js_type=True) def theta(self): return self._config_get(0.5) def theta(self, num: float): self._config(num, js_type=False) def type(self): return self._config_get((reingold - fruchterman)) def type(self, value: Any): self._config(value, js_type=False)
.django_db def test_alternate_year(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index): setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index) resp = client.get(url.format(toptier_code='001', filter='?fiscal_year=2020')) expected_results = {'toptier_code': '001', 'fiscal_year': 2020, 'sub_agency_count': 1, 'office_count': 1, 'messages': []} assert (resp.status_code == status.HTTP_200_OK) assert (resp.json() == expected_results)
class CustomBackendtests(DatabaseTestCase): def setUp(self): super().setUp() create_distro(self.session) self.create_project() def create_project(self): project = models.Project(name='geany', homepage=' version_url=' regex='DEFAULT', backend=BACKEND) self.session.add(project) self.session.commit() project = models.Project(name='fake', homepage=' regex='DEFAULT', backend=BACKEND) self.session.add(project) self.session.commit() project = models.Project(name='subsurface', homepage=' version_url=' regex='DEFAULT', backend=BACKEND) self.session.add(project) self.session.commit() def test_custom_get_version(self): pid = 1 project = models.Project.get(self.session, pid) exp = '1.33' obs = backend.CustomBackend.get_version(project) self.assertEqual(obs, exp) pid = 2 project = models.Project.get(self.session, pid) self.assertRaises(AnityaPluginException, backend.CustomBackend.get_version, project) pid = 3 project = models.Project.get(self.session, pid) exp = '4.7.7' obs = backend.CustomBackend.get_version(project) self.assertEqual(obs, exp) def test_get_version_url(self): project = models.Project(name='test', homepage=' version_url=' backend=BACKEND) exp = project.version_url obs = backend.CustomBackend.get_version_url(project) self.assertEqual(obs, exp) def test_custom_get_versions(self): pid = 1 project = models.Project.get(self.session, pid) exp = ['1.33'] obs = backend.CustomBackend.get_versions(project) self.assertEqual(sorted(obs), exp) pid = 2 project = models.Project.get(self.session, pid) self.assertRaises(AnityaPluginException, backend.CustomBackend.get_version, project) pid = 3 project = models.Project.get(self.session, pid) exp = ['3.1.1', '4.0', '4.0.1', '4.0.2', '4.0.3', '4.1', '4.2', '4.3', '4.4.0', '4.4.1', '4.4.2', '4.5.0', '4.5.1', '4.5.2', '4.5.3', '4.5.4', '4.5.5', '4.5.6', '4.6.0', '4.6.1', '4.6.2', '4.6.3', '4.6.4', '4.7.1', '4.7.2', '4.7.3', '4.7.4', '4.7.5', '4.7.6', '4.7.7'] obs = backend.CustomBackend.get_ordered_versions(project) self.assertEqual(sorted(obs), exp) def test_custom_get_versions_unstable(self): project = models.Project(name='grub', homepage=' version_url=' regex='DEFAULT', backend=BACKEND) exp = ['2.02', '2.02-beta3', '2.02-rc1', '2.02-rc2', '2.04', '2.04-rc1', '2.06-rc1'] obs = backend.CustomBackend.get_ordered_versions(project) self.assertEqual(sorted(obs), exp)
class ActorCriticEvents(ABC): _epoch_stats(np.mean) def time_rollout(self, value: float): _epoch_stats(np.mean) def time_epoch(self, value: float): _epoch_stats(np.mean) def time_update(self, value: float): _epoch_stats(np.mean) def learning_rate(self, value: float): _epoch_stats(np.nanmean) _stats_grouping('substep_key') def policy_loss(self, substep_key: int, value: float): _epoch_stats(np.nanmean) _stats_grouping('substep_key') def policy_grad_norm(self, substep_key: int, value: float): _epoch_stats(np.nanmean) _stats_grouping('substep_key') def policy_entropy(self, substep_key: int, value: float): _epoch_stats(np.nanmean) _stats_grouping('critic_id') def critic_value(self, critic_id: int, value: float): _epoch_stats(np.nanmean) _stats_grouping('critic_id') def critic_value_loss(self, critic_id: int, value: float): _epoch_stats(np.nanmean) _stats_grouping('critic_id') def critic_grad_norm(self, critic_id: int, value: float):
class readtext(TestCase): def _test(self, method): try: with tempfile.NamedTemporaryFile('w', delete=False) as f: f.write('foobar') self.assertEqual(util.readtext(method(f.name)), 'foobar') finally: os.remove(str(f.name)) def test_str(self): self._test(str) def test_path(self): self._test(pathlib.Path) def test_file(self): self.assertEqual(util.readtext(io.StringIO('foobar')), 'foobar') def test_typeerror(self): with self.assertRaises(TypeError): util.readtext(None)
class pyttsx(): def __init__(self): self.max_chars = 5000 self.voices = [] def run(self, text: str, filepath: str, random_voice=False): voice_id = settings.config['settings']['tts']['python_voice'] voice_num = settings.config['settings']['tts']['py_voice_num'] if ((voice_id == '') or (voice_num == '')): voice_id = 2 voice_num = 3 raise ValueError('set pyttsx values to a valid value, switching to defaults') else: voice_id = int(voice_id) voice_num = int(voice_num) for i in range(voice_num): self.voices.append(i) i = (+ 1) if random_voice: voice_id = self.randomvoice() engine = pyttsx3.init() voices = engine.getProperty('voices') engine.setProperty('voice', voices[voice_id].id) engine.save_to_file(text, f'{filepath}') engine.runAndWait() def randomvoice(self): return random.choice(self.voices)
class Test(unittest.TestCase): def setUpClass(cls): pass def tearDownClass(cls): pass def setUp(self): pass def tearDown(self): pass def event_gen(self, threshold, action): event_props = {} r = random.random() event_props['action'] = (action[0] if (r > threshold) else action[1]) return event_props def test_event_schedule_add(self): es = fnss.EventSchedule() es.add(8, {'add_order': 1}, absolute_time=True) es.add(5, {'add_order': 2}, absolute_time=True) self.assertEqual(2, es.number_of_events()) self.assertEqual(5, es[0][0]) self.assertEqual({'add_order': 2}, es[0][1]) self.assertEqual(8, es[1][0]) self.assertEqual({'add_order': 1}, es[1][1]) def test_event_schedule_pop(self): es = fnss.EventSchedule() es.add(8, {'add_order': 1}, absolute_time=True) es.add(5, {'add_order': 2}, absolute_time=True) (t0, e0) = es.pop(0) (t1, e1) = es.pop(0) self.assertEqual(5, t0) self.assertEqual(8, t1) self.assertEqual({'add_order': 2}, e0) self.assertEqual({'add_order': 1}, e1) self.assertEqual(0, es.number_of_events()) def test_event_schedule_events_between(self): es = fnss.EventSchedule() es.add(5, {'event_order': 3}, absolute_time=True) es.add(4, {'event_order': 2}, absolute_time=True) es.add(3, {'event_order': 1}, absolute_time=True) es.add(7, {'event_order': 5}, absolute_time=True) es.add(6, {'event_order': 4}, absolute_time=True) es.add(8, {'event_order': 6}, absolute_time=True) events = es.events_between(5, 7) self.assertEqual(2, events.number_of_events()) self.assertEqual(5, events[0][0]) self.assertEqual({'event_order': 3}, events[0][1]) self.assertEqual(6, events[1][0]) self.assertEqual({'event_order': 4}, events[1][1]) def test_event_schedule_add_schedule(self): es1 = fnss.EventSchedule(t_unit='s') es1.add(3, {'event_order': 1}, absolute_time=True) es1.add(5, {'event_order': 3}, absolute_time=True) es2 = fnss.EventSchedule(t_unit='ms') es2.add(4000, {'event_order': 2}, absolute_time=True) es2.add(7000, {'event_order': 5}, absolute_time=True) es1.add_schedule(es2) self.assertEqual(4, len(es1)) self.assertEqual('s', es1.attrib['t_unit']) self.assertEqual(3, es1[0][0]) self.assertEqual(4, es1[1][0]) self.assertEqual(5, es1[2][0]) self.assertEqual(7, es1[3][0]) def test_event_schedule_add_operator(self): es1 = fnss.EventSchedule(t_unit='s') es1.add(3, {'event_order': 1}, absolute_time=True) es1.add(5, {'event_order': 3}, absolute_time=True) es2 = fnss.EventSchedule(t_unit='ms') es2.add(4000, {'event_order': 2}, absolute_time=True) es2.add(7000, {'event_order': 5}, absolute_time=True) es3 = (es1 + es2) self.assertEqual(4, len(es3)) self.assertEqual('s', es3.attrib['t_unit']) self.assertEqual(3, es3[0][0]) self.assertEqual(4, es3[1][0]) self.assertEqual(5, es3[2][0]) self.assertEqual(7, es3[3][0]) def test_event_schedule_operators(self): es = fnss.EventSchedule() es.add(5, {'event_order': 3}, absolute_time=True) es.add(4, {'event_order': 2}, absolute_time=True) es.add(3, {'event_order': 1}, absolute_time=True) es.add(7, {'event_order': 5}, absolute_time=True) es.add(6, {'event_order': 4}, absolute_time=True) es.add(8, {'event_order': 6}, absolute_time=True) self.assertEqual(6, len(es)) self.assertEqual(2, len(es[3:5])) del es[0] self.assertEqual(5, len(es)) for ev in es: (t, _) = ev self.assertGreaterEqual(t, es.attrib['t_start']) self.assertLessEqual(t, es.attrib['t_end']) def test_deterministic_process_event_schedule(self): action = ['read_email', 'watch_video'] schedule = fnss.deterministic_process_event_schedule(20, 0, 80001, 'ms', self.event_gen, 0.5, action=action) self.assertIsNotNone(schedule) self.assertEqual(4000, len(schedule)) for (time, event) in schedule: self.assertTrue((event['action'] in action)) self.assertTrue((time >= 0)) self.assertTrue((time <= 80001)) def test_poisson_process_event_schedule(self): action = ['read_email', 'watch_video'] schedule = fnss.poisson_process_event_schedule(15, 0, 8000, 'ms', self.event_gen, 0.5, action=action) self.assertIsNotNone(schedule) for (time, event) in schedule: self.assertTrue((event['action'] in action)) self.assertTrue((time >= 0)) self.assertTrue((time <= 8000)) ((TMP_DIR is None), 'Temp folder not present') def test_read_write_event_schedule(self): action = ['read_email', 'watch_video'] schedule = fnss.deterministic_process_event_schedule(20, 0, 801, 'ms', self.event_gen, 0.5, action=action) (time, event) = schedule[2] tmp_es_file = path.join(TMP_DIR, 'event-schedule.xml') fnss.write_event_schedule(schedule, tmp_es_file) read_schedule = fnss.read_event_schedule(tmp_es_file) self.assertEqual(len(schedule), len(read_schedule)) (read_time, read_event) = read_schedule[2] self.assertEqual(time, read_time) self.assertEqual(event, read_event) ((TMP_DIR is None), 'Temp folder not present') def test_read_write_event_schedule_special_type(self): schedule = fnss.EventSchedule() event = {'tuple_param': (1, 2, 3), 'dict_param': {'a': 1, 'b': 2}, 'list_param': [1, 'hello', 0.3]} schedule.add(1, event) tmp_es_file = path.join(TMP_DIR, 'event-schedule-special.xml') fnss.write_event_schedule(schedule, tmp_es_file) read_schedule = fnss.read_event_schedule(tmp_es_file) self.assertEqual(len(schedule), len(read_schedule)) (_, read_event) = read_schedule[0] self.assertEqual(event, read_event) self.assertEqual(tuple, type(read_event['tuple_param'])) self.assertEqual(list, type(read_event['list_param'])) self.assertEqual(dict, type(read_event['dict_param'])) self.assertEqual(event['dict_param'], read_event['dict_param']) self.assertEqual(event['list_param'], read_event['list_param']) self.assertEqual(event['tuple_param'], read_event['tuple_param'])
def _crawler_factory(storage, progresser, client, parallel, threads): excluded_resources = set(client.config.get('excluded_resources', [])) config_variables = {'excluded_resources': excluded_resources} if parallel: parallel_config = ParallelCrawlerConfig(storage, progresser, client, threads=threads, variables=config_variables) return ParallelCrawler(parallel_config) crawler_config = CrawlerConfig(storage, progresser, client, variables=config_variables) return Crawler(crawler_config)
class CMSBaseTest(TestCase): def test_01_simple_content_type_creation(self): self.assertEqual(ExampleCMSBase.content_type_for(RawContent), None) ExampleCMSBase.create_content_type(RawContent, regions=('main2',)) ExampleCMSBase.create_content_type(RichTextContent) self.assertEqual(ExampleCMSBase.content_type_for(Empty), None) self.assertTrue(('rawcontent' not in dict(ExampleCMSBase.template.regions[0].content_types).keys())) def test_04_mediafilecontent_creation(self): self.assertRaises(ImproperlyConfigured, (lambda : ExampleCMSBase.create_content_type(MediaFileContent))) def test_05_non_abstract_content_type(self): class TestContentType(models.Model): pass self.assertRaises(ImproperlyConfigured, (lambda : ExampleCMSBase.create_content_type(TestContentType))) def test_07_default_render_method(self): class SomethingElse(models.Model): class Meta(): abstract = True def render_region(self): return 'hello' type = ExampleCMSBase.create_content_type(SomethingElse) obj = type() self.assertRaises(NotImplementedError, (lambda : obj.render())) obj.region = 'region' self.assertEqual(obj.render(), 'hello') def test_08_creating_two_content_types_in_same_application(self): ExampleCMSBase.create_content_type(RawContent) ct = ExampleCMSBase.content_type_for(RawContent) self.assertEqual(ct._meta.db_table, 'testapp_examplecmsbase_rawcontent') ExampleCMSBase2.create_content_type(RawContent, class_name='RawContent2') ct2 = ExampleCMSBase2.content_type_for(RawContent) self.assertEqual(ct2._meta.db_table, 'testapp_examplecmsbase2_rawcontent2') def test_10_content_type_subclasses(self): ExampleCMSBase.create_content_type(SubRawContent) ExampleCMSBase.create_content_type(RawContent) ct = ExampleCMSBase.content_type_for(RawContent) ct2 = ExampleCMSBase.content_type_for(SubRawContent) self.assertNotEqual(ct, ct2)
def make_las_campanas_atmospheric_layers(input_grid, r0=0.16, L0=25, wavelength=5.5e-07): heights = np.array([250, 500, 1000, 2000, 4000, 8000, 16000]) velocities = np.array([10, 10, 20, 20, 25, 30, 25]) integrated_cn_squared = Cn_squared_from_fried_parameter(r0, wavelength=5e-07) Cn_squared = (np.array([0.42, 0.03, 0.06, 0.16, 0.11, 0.1, 0.12]) * integrated_cn_squared) layers = [] for (h, v, cn) in zip(heights, velocities, Cn_squared): layers.append(InfiniteAtmosphericLayer(input_grid, cn, L0, v, h, 2)) return layers
def run_config(_, config, output, config_env): del output def do_show_config(): print(config_env) def do_set_endpoint(): config_env['endpoint'] = config.hostport DefaultConfigParser.persist(config_env) do_show_config() def do_set_model(): config_env['model'] = config.name DefaultConfigParser.persist(config_env) do_show_config() def do_set_output(): config_env['format'] = config.name DefaultConfigParser.persist(config_env) do_show_config() def do_delete_config(): del config_env[config.key] DefaultConfigParser.persist(config_env) do_show_config() def do_reset_config(): for key in config_env: del config_env[key] DefaultConfigParser.persist(config_env) do_show_config() actions = {'show': do_show_config, 'model': do_set_model, 'endpoint': do_set_endpoint, 'format': do_set_output, 'reset': do_reset_config, 'delete': do_delete_config} actions[config.action]()
def CreateBmmRCROperator(manifest): operation_kind = library.GemmKind.BatchGemm a_element_desc = library.TensorDesc(library.DataType.f16, library.LayoutType.RowMajor) b_element_desc = library.TensorDesc(library.DataType.f16, library.LayoutType.ColumnMajor) c_element_desc = library.TensorDesc(library.DataType.f16, library.LayoutType.RowMajor) element_op = library.TensorOperation.PassThrough tile_descriptions = [gemm.TileDesc(256, 256, 128, 4, 8, 0, 32, 32, 4, 2), gemm.TileDesc(256, 128, 256, 4, 8, 0, 32, 32, 2, 4), gemm.TileDesc(128, 128, 128, 4, 8, 0, 32, 32, 4, 2), gemm.TileDesc(256, 128, 128, 4, 8, 0, 32, 32, 2, 2), gemm.TileDesc(128, 128, 64, 4, 8, 0, 32, 32, 2, 2), gemm.TileDesc(128, 64, 128, 4, 8, 0, 32, 32, 2, 2), gemm.TileDesc(64, 64, 64, 4, 8, 0, 32, 32, 2, 2), gemm.TileDesc(256, 128, 64, 4, 8, 0, 32, 32, 2, 1), gemm.TileDesc(256, 64, 128, 4, 8, 0, 32, 32, 1, 2), gemm.TileDesc(128, 128, 32, 4, 8, 0, 32, 32, 2, 1), gemm.TileDesc(128, 32, 128, 4, 8, 0, 32, 32, 1, 2), gemm.TileDesc(64, 64, 32, 4, 8, 0, 32, 32, 2, 1), gemm.TileDesc(64, 32, 64, 4, 8, 0, 32, 32, 1, 2)] block_descriptions = [] for t in tile_descriptions: block_transfer = (- 1) if (t.block_size == 256): block_transfer = [4, 64, 1] if (t.block_size == 128): block_transfer = [4, 32, 1] if (t.block_size == 64): block_transfer = [4, 16, 1] assert ((block_transfer != (- 1)) and ('Cannot determine block_transfer_size with block_size ' + str(t.block_size))) block_descriptions.append(gemm.BlockTransferDesc(block_transfer, [1, 0, 2], [1, 0, 2], 2, 8, 8, 1, True)) gemm_specialization = [gemm.GemmSpecialization.GemmDefault, gemm.GemmSpecialization.MNKPadding] operations = [] for gemm_spec in gemm_specialization: for (tile_desc, block_desc) in zip(tile_descriptions, block_descriptions): new_operation = gemm.GemmOperation(operation_kind=operation_kind, extra_kind=element_op, xdl_op_type=gemm.XdlOpType.DeviceBatchedGemmXdl, A=a_element_desc, B=b_element_desc, C=c_element_desc, a_elem_op=element_op, b_elem_op=element_op, epilogue_functor=element_op, gemm_specialization=gemm_spec, tile_desc=tile_desc, a_block_transfer=block_desc, b_block_transfer=block_desc) manifest.append(new_operation) operations.append(new_operation) return operations
class agents(AppCommand): title = 'Agents' headers = ['name', 'topic', 'help'] sortkey = attrgetter('name') options = [option('--local/--no-local', help='Include agents using a local channel')] async def run(self, local: bool) -> None: self.say(self.tabulate([self.agent_to_row(agent) for agent in self.agents(local=local)], headers=self.headers, title=self.title)) def agents(self, *, local: bool=False) -> Sequence[AgentT]: sortkey = cast(Callable[([Type[AgentT]], Any)], self.sortkey) return [agent for agent in sorted(self.app.agents.values(), key=sortkey) if (self._maybe_topic(agent) or local)] def agent_to_row(self, agent: AgentT) -> Sequence[str]: return [self._name(agent), self._topic(agent), self._help(agent)] def _name(self, agent: AgentT) -> str: return ('' + self.abbreviate_fqdn(agent.name)) def _maybe_topic(self, agent: AgentT) -> Optional[str]: try: return agent.channel.get_topic_name() except NotImplementedError: return None def _topic(self, agent: AgentT) -> str: return (self._maybe_topic(agent) or '<LOCAL>') def _help(self, agent: AgentT) -> str: return (agent.help or '<N/A>')
def create_wilderness(name='default', mapprovider=None, preserve_items=False): if WildernessScript.objects.filter(db_key=name).exists(): return if (not mapprovider): mapprovider = WildernessMapProvider() script = create_script(WildernessScript, key=name) script.db.mapprovider = mapprovider if preserve_items: script.preserve_items = True
def process_transaction(env: vm.Environment, tx: Transaction) -> Tuple[(Uint, Tuple[(Log, ...)], Optional[Exception])]: ensure(validate_transaction(tx), InvalidBlock) sender = env.origin sender_account = get_account(env.state, sender) if isinstance(tx, FeeMarketTransaction): gas_fee = (tx.gas * tx.max_fee_per_gas) else: gas_fee = (tx.gas * tx.gas_price) ensure((sender_account.nonce == tx.nonce), InvalidBlock) ensure((sender_account.balance >= (gas_fee + tx.value)), InvalidBlock) ensure((sender_account.code == bytearray()), InvalidBlock) effective_gas_fee = (tx.gas * env.gas_price) gas = (tx.gas - calculate_intrinsic_cost(tx)) increment_nonce(env.state, sender) sender_balance_after_gas_fee = (sender_account.balance - effective_gas_fee) set_account_balance(env.state, sender, sender_balance_after_gas_fee) preaccessed_addresses = set() preaccessed_storage_keys = set() if isinstance(tx, (AccessListTransaction, FeeMarketTransaction)): for (address, keys) in tx.access_list: preaccessed_addresses.add(address) for key in keys: preaccessed_storage_keys.add((address, key)) message = prepare_message(sender, tx.to, tx.value, tx.data, gas, env, preaccessed_addresses=frozenset(preaccessed_addresses), preaccessed_storage_keys=frozenset(preaccessed_storage_keys)) output = process_message_call(message, env) gas_used = (tx.gas - output.gas_left) gas_refund = min((gas_used // 5), output.refund_counter) gas_refund_amount = ((output.gas_left + gas_refund) * env.gas_price) priority_fee_per_gas = (env.gas_price - env.base_fee_per_gas) transaction_fee = (((tx.gas - output.gas_left) - gas_refund) * priority_fee_per_gas) total_gas_used = (gas_used - gas_refund) sender_balance_after_refund = (get_account(env.state, sender).balance + gas_refund_amount) set_account_balance(env.state, sender, sender_balance_after_refund) coinbase_balance_after_mining_fee = (get_account(env.state, env.coinbase).balance + transaction_fee) if (coinbase_balance_after_mining_fee != 0): set_account_balance(env.state, env.coinbase, coinbase_balance_after_mining_fee) elif account_exists_and_is_empty(env.state, env.coinbase): destroy_account(env.state, env.coinbase) for address in output.accounts_to_delete: destroy_account(env.state, address) for address in output.touched_accounts: if account_exists_and_is_empty(env.state, address): destroy_account(env.state, address) return (total_gas_used, output.logs, output.error)
class RobotWebHandler(BaseHTTPRequestHandler): mimetype = {'css': 'text/css', 'gif': 'image/gif', 'html': 'text/html', 'ico': 'image/x-icon', 'jpg': 'image/jpg', 'js': 'application/javascript', 'png': 'image/png'} def do_GET(self): if (self.path == '/'): self.path = '/index.html' if ('.' in self.path): extension = self.path.split('.')[(- 1)] mt = self.mimetype.get(extension) if mt: filename = ((os.curdir + os.sep) + self.path) if os.path.exists(filename): self.send_response(200) self.send_header('Content-type', mt) self.end_headers() if (extension in ('gif', 'ico', 'jpg', 'png')): with open(filename, mode='rb') as fh: self.wfile.write(fh.read()) else: with open(filename, mode='r') as fh: self.wfile.write(fh.read().encode()) else: log.error(('404: %s not found' % self.path)) self.send_error(404, ('File Not Found: %s' % self.path)) return True return False def log_message(self, format, *args): pass
def get_playback_intros(item_id): log.debug('get_playback_intros') data_manager = DataManager() url = ('{server}/emby/Users/{userid}/Items/%s/Intros' % item_id) intro_items = data_manager.get_content(url) if (intro_items is None): log.debug('get_playback_intros failed!') return into_list = [] intro_items = intro_items['Items'] for into in intro_items: into_list.append(into) return into_list
class OptionSeriesCylinderSonificationTracksMappingHighpass(Options): def frequency(self) -> 'OptionSeriesCylinderSonificationTracksMappingHighpassFrequency': return self._config_sub_data('frequency', OptionSeriesCylinderSonificationTracksMappingHighpassFrequency) def resonance(self) -> 'OptionSeriesCylinderSonificationTracksMappingHighpassResonance': return self._config_sub_data('resonance', OptionSeriesCylinderSonificationTracksMappingHighpassResonance)
def get_ros_home(): try: import rospkg.distro distro = rospkg.distro.current_distro_codename() if (distro in ['electric', 'diamondback', 'cturtle']): import roslib.rosenv return roslib.rosenv.get_ros_home() else: from rospkg import get_ros_home return get_ros_home() except Exception: from roslib import rosenv return rosenv.get_ros_home()
class SparkToCSVStrategy(AbstractToCSVStrategy): def __init__(self, logger: logging.Logger, *args, **kwargs): super().__init__(*args, **kwargs) self._logger = logger def download_to_csv(self, source_sql, destination_path, destination_file_name, working_dir_path, covid_profile_download_zip_path): from pyspark.sql import SparkSession from usaspending_api.common.etl.spark import hadoop_copy_merge, write_csv_file from usaspending_api.common.helpers.spark_helpers import configure_spark_session, get_active_spark_session self.spark = None destination_path_dir = str(destination_path).replace(f'/{destination_file_name}', '') s3_bucket_name = settings.BULK_DOWNLOAD_S3_BUCKET_NAME s3_bucket_path = f's3a://{s3_bucket_name}' s3_bucket_sub_path = 'temp_covid_download' s3_destination_path = f'{s3_bucket_path}/{s3_bucket_sub_path}/{destination_file_name}' try: extra_conf = {'spark.sql.extensions': 'io.delta.sql.DeltaSparkSessionExtension', 'spark.sql.catalog.spark_catalog': 'org.apache.spark.sql.delta.catalog.DeltaCatalog', 'spark.sql.legacy.parquet.datetimeRebaseModeInWrite': 'LEGACY', 'spark.sql.legacy.parquet.int96RebaseModeInWrite': 'LEGACY', 'spark.sql.jsonGenerator.ignoreNullFields': 'false'} self.spark = get_active_spark_session() self.spark_created_by_command = False if (not self.spark): self.spark_created_by_command = True self.spark = configure_spark_session(**extra_conf, spark_context=self.spark) df = self.spark.sql(source_sql) record_count = write_csv_file(self.spark, df, parts_dir=s3_destination_path, logger=self._logger) header = ','.join([_.name for _ in df.schema.fields]) self._logger.info('Concatenating partitioned output files ...') merged_file_paths = hadoop_copy_merge(spark=self.spark, parts_dir=s3_destination_path, header=header, max_rows_per_merged_file=EXCEL_ROW_LIMIT, logger=self._logger) final_csv_data_file_locations = self._move_data_csv_s3_to_local(s3_bucket_name, merged_file_paths, s3_bucket_path, s3_bucket_sub_path, destination_path_dir) except Exception: self._logger.exception('Exception encountered. See logs') raise finally: delete_s3_object(s3_bucket_name, s3_destination_path) if self.spark_created_by_command: self.spark.stop() append_files_to_zip_file(final_csv_data_file_locations, covid_profile_download_zip_path) self._logger.info(f'Generated the following data csv files {final_csv_data_file_locations}') return (final_csv_data_file_locations, record_count) def _move_data_csv_s3_to_local(self, bucket_name, s3_file_paths, s3_bucket_path, s3_bucket_sub_path, destination_path_dir) -> List[str]: start_time = time.time() self._logger.info('Moving data files from S3 to local machine...') local_csv_file_paths = [] for file_name in s3_file_paths: s3_key = file_name.replace(f'{s3_bucket_path}/', '') file_name_only = s3_key.replace(f'{s3_bucket_sub_path}/', '') final_path = f'{destination_path_dir}/{file_name_only}' download_s3_object(bucket_name, s3_key, final_path) local_csv_file_paths.append(final_path) self._logger.info(f'Copied data files from S3 to local machine in {(time.time() - start_time):3f}s') return local_csv_file_paths
class OptionPlotoptionsOrganizationSonificationContexttracksMappingTremolo(Options): def depth(self) -> 'OptionPlotoptionsOrganizationSonificationContexttracksMappingTremoloDepth': return self._config_sub_data('depth', OptionPlotoptionsOrganizationSonificationContexttracksMappingTremoloDepth) def speed(self) -> 'OptionPlotoptionsOrganizationSonificationContexttracksMappingTremoloSpeed': return self._config_sub_data('speed', OptionPlotoptionsOrganizationSonificationContexttracksMappingTremoloSpeed)
class OptionSeriesStreamgraphDatalabels(Options): def align(self): return self._config_get('center') def align(self, text: str): self._config(text, js_type=False) def allowOverlap(self): return self._config_get(False) def allowOverlap(self, flag: bool): self._config(flag, js_type=False) def animation(self) -> 'OptionSeriesStreamgraphDatalabelsAnimation': return self._config_sub_data('animation', OptionSeriesStreamgraphDatalabelsAnimation) def backgroundColor(self): return self._config_get(None) def backgroundColor(self, text: str): self._config(text, js_type=False) def borderColor(self): return self._config_get(None) def borderColor(self, text: str): self._config(text, js_type=False) def borderRadius(self): return self._config_get(0) def borderRadius(self, num: float): self._config(num, js_type=False) def borderWidth(self): return self._config_get(0) def borderWidth(self, num: float): self._config(num, js_type=False) def className(self): return self._config_get(None) def className(self, text: str): self._config(text, js_type=False) def color(self): return self._config_get(None) def color(self, text: str): self._config(text, js_type=False) def crop(self): return self._config_get(True) def crop(self, flag: bool): self._config(flag, js_type=False) def defer(self): return self._config_get(True) def defer(self, flag: bool): self._config(flag, js_type=False) def enabled(self): return self._config_get(False) def enabled(self, flag: bool): self._config(flag, js_type=False) def filter(self) -> 'OptionSeriesStreamgraphDatalabelsFilter': return self._config_sub_data('filter', OptionSeriesStreamgraphDatalabelsFilter) def format(self): return self._config_get('point.value') def format(self, text: str): self._config(text, js_type=False) def formatter(self): return self._config_get(None) def formatter(self, value: Any): self._config(value, js_type=False) def inside(self): return self._config_get(None) def inside(self, flag: bool): self._config(flag, js_type=False) def nullFormat(self): return self._config_get(None) def nullFormat(self, flag: bool): self._config(flag, js_type=False) def nullFormatter(self): return self._config_get(None) def nullFormatter(self, value: Any): self._config(value, js_type=False) def overflow(self): return self._config_get('justify') def overflow(self, text: str): self._config(text, js_type=False) def padding(self): return self._config_get(5) def padding(self, num: float): self._config(num, js_type=False) def position(self): return self._config_get('center') def position(self, text: str): self._config(text, js_type=False) def rotation(self): return self._config_get(0) def rotation(self, num: float): self._config(num, js_type=False) def shadow(self): return self._config_get(False) def shadow(self, flag: bool): self._config(flag, js_type=False) def shape(self): return self._config_get('square') def shape(self, text: str): self._config(text, js_type=False) def style(self): return self._config_get(None) def style(self, value: Any): self._config(value, js_type=False) def textPath(self) -> 'OptionSeriesStreamgraphDatalabelsTextpath': return self._config_sub_data('textPath', OptionSeriesStreamgraphDatalabelsTextpath) def useHTML(self): return self._config_get(False) def useHTML(self, flag: bool): self._config(flag, js_type=False) def verticalAlign(self): return self._config_get('bottom') def verticalAlign(self, text: str): self._config(text, js_type=False) def x(self): return self._config_get(0) def x(self, num: float): self._config(num, js_type=False) def y(self): return self._config_get(0) def y(self, num: float): self._config(num, js_type=False) def zIndex(self): return self._config_get(6) def zIndex(self, num: float): self._config(num, js_type=False)
class NodeArrayAssignment(Node): def __init__(self, ids_map_list, token_id, literal_tokens_list): Node.__init__(self, ids_map_list) self.token_id = token_id self.literal_tokens_list = literal_tokens_list def get_code(self, current_pointer, *args, **kwargs): array_dimensions = get_variable_dimensions_from_token(self.ids_map_list, self.token_id) unpacked_literals_list = unpack_literal_tokens_to_array_dimensions(self.token_id, array_dimensions, self.literal_tokens_list) offset = get_offset_to_variable(self.ids_map_list, self.token_id, current_pointer) code = ('<' * offset) for literal in unpacked_literals_list: code += get_literal_token_code(literal) code += ('>' * (offset - len(unpacked_literals_list))) code += '>' return code
class GithubClient(): def __init__(self, token: Optional[str]=None): self.assert_github() self.client: Github = Github(token) self.unauthenticated_client = Github() self.__token = token self.__authenticated_client = None def assert_github(cls): if (not Github): raise ModuleNotFoundError('Missing PyGithub - try running `pip3 install .[dev]`') def authenticated_client(self) -> Github: if (not self.__token): raise ValueError('Token not defined! Re-instantiate with a token or use add_token method') if (not self.__authenticated_client): self.__authenticated_client = Github(self.__token) return self.__authenticated_client def add_token(self, token): self.__token = token
def react_and_map(reactants, rxn): for (i, reactant) in enumerate(reactants): for atom in reactant.GetAtoms(): atom.SetIntProp('reactant_idx', i) reactive_atoms_idxmap = {} for i in range(rxn.GetNumReactantTemplates()): reactant_template = rxn.GetReactantTemplate(i) for atom in reactant_template.GetAtoms(): if atom.GetAtomMapNum(): reactive_atoms_idxmap[atom.GetAtomMapNum()] = i products_list = rxn.RunReactants(reactants) index_map = {'reactant_idx': [], 'atom_idx': []} for products in products_list: result_reac_map = [] result_atom_map = [] for product in products: atom_idxmap = [] reac_idxmap = [] for atom in product.GetAtoms(): if atom.HasProp('reactant_idx'): reactant_idx = atom.GetIntProp('reactant_idx') elif atom.HasProp('old_mapno'): reactant_idx = reactive_atoms_idxmap[atom.GetIntProp('old_mapno')] else: reactant_idx = None reac_idxmap.append(reactant_idx) if atom.HasProp('react_atom_idx'): atom_idxmap.append(atom.GetIntProp('react_atom_idx')) else: atom_idxmap.append(None) result_reac_map.append(reac_idxmap) result_atom_map.append(atom_idxmap) index_map['reactant_idx'].append(result_reac_map) index_map['atom_idx'].append(result_atom_map) return (products_list, index_map)
class Steppers(): def __init__(self, ui): self.page = ui.page def stepper(self, records: List[dict]=None, width: Union[(tuple, int)]=('auto', ''), height: Union[(tuple, int)]=(70, 'px'), color: str=None, options: dict=None, profile: Union[(dict, bool)]=False) -> html.HtmlStepper.Stepper: width = Arguments.size(width, unit='%') height = Arguments.size(height, unit='px') dft_options = {'line': True} if (options is not None): dft_options.update(options) st = html.HtmlStepper.Stepper(self.page, records, width, height, color, dft_options, profile) st.style.add_classes.div.stepper() if dft_options.get('media', True): st.style.css_class.media({'.cssdivstepper li': {'float': None, 'width': '100%'}, '.cssdivstepper li line': {'stroke-width': 0}, '.cssdivstepper li [name=label]': {'width': '100%!IMPORTANT'}}, 'only', 'screen', {'and': [{'max-width': '600px'}]}) html.Html.set_component_skin(st) return st def arrow(self, records: List[dict]=None, width: Union[(tuple, int)]=('auto', ''), height: Union[(tuple, int)]=(70, 'px'), color: str=None, options: dict=None, profile: Union[(dict, bool)]=None) -> html.HtmlStepper.Stepper: dft_options = {'shape': 'arrow'} if (options is not None): dft_options.update(options) component = self.stepper(records, width, height, color, dft_options, profile) html.Html.set_component_skin(component) return component def rectangle(self, records: List[dict]=None, width: Union[(tuple, int)]=('auto', ''), height: Union[(tuple, int)]=(70, 'px'), color: str=None, options: dict=None, profile: Union[(dict, bool)]=None) -> html.HtmlStepper.Stepper: dft_options = {'shape': 'rectangle'} if (options is not None): dft_options.update(options) component = self.stepper(records, width, height, color, dft_options, profile) html.Html.set_component_skin(component) return component def triangle(self, records: List[dict]=None, width: Union[(tuple, int)]=('auto', ''), height: Union[(tuple, int)]=(70, 'px'), color: str=None, options: dict=None, profile: Union[(dict, bool)]=None) -> html.HtmlStepper.Stepper: dft_options = {'shape': 'triangle'} if (options is not None): dft_options.update(options) component = self.stepper(records, width, height, color, dft_options, profile) html.Html.set_component_skin(component) return component def vertical(self, records: List[dict]=None, shape: str='circle', width: Union[(tuple, int)]=('auto', ''), height: Union[(tuple, int)]=(70, 'px'), color: str=None, options: dict=None, profile: Union[(dict, bool)]=None) -> html.HtmlStepper.Stepper: width = Arguments.size(width, unit='%') height = Arguments.size(height, unit='px') dft_options = {'line': False, 'shape': shape} if (options is not None): dft_options.update(options) st = html.HtmlStepper.Stepper(self.page, records, width, height, color, dft_options, profile) html.Html.set_component_skin(st) return st
def mock_audit_system_is_disabled_when_audit_logs_are_full_pass(self, cmd): if ('^space_left_action' in cmd): stdout = ['space_left_action = email'] elif ('^action_mail_acct' in cmd): stdout = ['action_mail_acct = root'] elif ('^admin_space_left_action' in cmd): stdout = ['admin_space_left_action = halt'] stderr = [''] returncode = 0 return SimpleNamespace(returncode=returncode, stderr=stderr, stdout=stdout)
def Reserve(i): discount_factor = [] for y in range(0, ((reserve.w - x) + 1)): if isinstance(i, float): discount_factor.append((1 / ((1 + i) ** (y + 1)))) elif (i == 'rfr'): discount_factor.append((1 / ((1 + rfr['Euro'][y]) ** (y + 1)))) APV_Premium = np.dot(Premium, px_vector) APV_Claims = np.dot(Cd, qx_vector) return np.dot(discount_factor, np.subtract(APV_Claims, APV_Premium)).round(2)
def remove_guarded_do_while(ast: AbstractSyntaxTree): for (do_while_node, condition_node) in _get_potential_guarded_do_while_loops(ast): if condition_node.false_branch: continue if do_while_node.condition.is_equal_to(condition_node.condition): ast.replace_condition_node_by_single_branch(condition_node) ast.substitute_loop_node(do_while_node, WhileLoopNode(do_while_node.condition, do_while_node.reaching_condition))
def test_capture_headers_config_is_dynamic_for_errors(client, django_elasticapm_client): django_elasticapm_client.config.update(version='1', capture_headers=True) with pytest.raises(MyException): client.post(reverse('elasticapm-raise-exc')) error = django_elasticapm_client.events[ERROR][0] assert error['context']['request']['headers'] django_elasticapm_client.config.update(version='1', capture_headers=False) with pytest.raises(MyException): client.post(reverse('elasticapm-raise-exc')) error = django_elasticapm_client.events[ERROR][1] assert ('headers' not in error['context']['request'])
class BeaconCallback(IntEnum): CALLBACK_OUTPUT = 0 CALLBACK_KEYSTROKES = 1 CALLBACK_FILE = 2 CALLBACK_SCREENSHOT = 3 CALLBACK_CLOSE = 4 CALLBACK_READ = 5 CALLBACK_CONNECT = 6 CALLBACK_PING = 7 CALLBACK_FILE_WRITE = 8 CALLBACK_FILE_CLOSE = 9 CALLBACK_PIPE_OPEN = 10 CALLBACK_PIPE_CLOSE = 11 CALLBACK_PIPE_READ = 12 CALLBACK_POST_ERROR = 13 CALLBACK_PIPE_PING = 14 CALLBACK_TOKEN_STOLEN = 15 CALLBACK_TOKEN_GETUID = 16 CALLBACK_PROCESS_LIST = 17 CALLBACK_POST_REPLAY_ERROR = 18 CALLBACK_PWD = 19 CALLBACK_JOBS = 20 CALLBACK_HASHDUMP = 21 CALLBACK_PENDING = 22 CALLBACK_ACCEPT = 23 CALLBACK_NETVIEW = 24 CALLBACK_PORTSCAN = 25 CALLBACK_DEAD = 26 CALLBACK_SSH_STATUS = 27 CALLBACK_CHUNK_ALLOCATE = 28 CALLBACK_CHUNK_SEND = 29 CALLBACK_OUTPUT_OEM = 30 CALLBACK_ERROR = 31 CALLBACK_OUTPUT_UTF8 = 32