code
stringlengths
281
23.7M
def check_no_unexpected_results(mypy_lines: Iterator[str]): df = mypy_to_pandas(mypy_lines) all_files = {str(fp).replace(str(DP_ROOT), '').strip(os.sep).replace(os.sep, '/') for fp in DP_ROOT.glob('pytensor/**/*.py')} failing = set(df.reset_index().file.str.replace(os.sep, '/', regex=False)) if (not failing.issubset(all_files)): raise Exception(('Mypy should have ignored these files:\n' + '\n'.join(sorted(map(str, (failing - all_files)))))) passing = (all_files - failing) expected_failing = set(FAILING) unexpected_failing = (failing - expected_failing) unexpected_passing = passing.intersection(expected_failing) if (not unexpected_failing): print(f'{len(passing)}/{len(all_files)} files pass as expected.') else: print('') print(f'{len(unexpected_failing)} files unexpectedly failed.') print('\n'.join(sorted(map(str, unexpected_failing)))) print(f'These files did not fail before, so please check the above output for errors in {unexpected_failing} and fix them.') print('You can run `python scripts/run_mypy.py --verbose` to reproduce this test locally.') sys.exit(1) if unexpected_passing: print('') print(f'{len(unexpected_passing)} files unexpectedly passed the type checks:') print('\n'.join(sorted(map(str, unexpected_passing)))) print('This is good news! Go to scripts/run_mypy.py and remove them from the `FAILING` list.') if all_files.issubset(passing): print('WOW! All files are passing the mypy type checks!') print('scripts\\run_mypy.py may no longer be needed.') print('') sys.exit(1) return
def test_self_update_can_update_from_recommended_installation(tester: CommandTester, repo: TestRepository, installed: TestRepository) -> None: new_version = Version.parse(__version__).next_minor().text old_poetry = Package('poetry', __version__) old_poetry.add_dependency(Factory.create_dependency('cleo', '^0.8.2')) new_poetry = Package('poetry', new_version) new_poetry.add_dependency(Factory.create_dependency('cleo', '^1.0.0')) installed.add_package(old_poetry) installed.add_package(Package('cleo', '0.8.2')) repo.add_package(new_poetry) repo.add_package(Package('cleo', '1.0.0')) tester.execute() expected_output = f'''Updating Poetry version ... Using version ^{new_version} for poetry Updating dependencies Resolving dependencies... Package operations: 0 installs, 2 updates, 0 removals - Updating cleo (0.8.2 -> 1.0.0) - Updating poetry ({__version__} -> {new_version}) Writing lock file ''' assert (tester.io.fetch_output() == expected_output)
class TestCompletionMetaInfo(): def metainfo(self, database): return history.CompletionMetaInfo(database) def test_contains_keyerror(self, metainfo): with pytest.raises(KeyError): ('does_not_exist' in metainfo) def test_getitem_keyerror(self, metainfo): with pytest.raises(KeyError): metainfo['does_not_exist'] def test_setitem_keyerror(self, metainfo): with pytest.raises(KeyError): metainfo['does_not_exist'] = 42 def test_contains(self, metainfo): assert ('excluded_patterns' in metainfo) def test_modify(self, metainfo): assert (not metainfo['excluded_patterns']) value = ' metainfo['excluded_patterns'] = value assert (metainfo['excluded_patterns'] == value) def test_recovery_no_key(self, caplog, database, stubs): web_history = history.WebHistory(database, stubs.FakeHistoryProgress()) web_history.metainfo.delete('key', 'force_rebuild') with pytest.raises(sql.BugError, match='No result for single-result query'): web_history.metainfo['force_rebuild'] with caplog.at_level(logging.WARNING): web_history2 = history.WebHistory(database, stubs.FakeHistoryProgress()) assert (not web_history2.metainfo['force_rebuild']) def test_recovery_no_table(self, caplog, database, stubs): web_history = history.WebHistory(database, stubs.FakeHistoryProgress()) web_history.metainfo.database.query('DROP TABLE CompletionMetaInfo').run() with pytest.raises(sql.BugError, match='no such table: CompletionMetaInfo'): web_history.metainfo['force_rebuild'] with caplog.at_level(logging.WARNING): web_history2 = history.WebHistory(database, stubs.FakeHistoryProgress()) assert (not web_history2.metainfo['force_rebuild'])
class Effect5359(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Hybrid Turret')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusGBC2'), skill='Gallente Battlecruiser', **kwargs)
def setUpModule(): global cell, kpts, gdf cell = gto.Cell() cell.build(a='\n 0.000000 1.783500 1.783500\n 1.783500 0.000000 1.783500\n 1.783500 1.783500 0.000000\n ', atom='C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375', verbose=7, output='/dev/null', pseudo='gth-pade', basis='gth-szv', precision=1e-08) kpts = cell.make_kpts([3, 1, 1], scaled_center=[0, 0, 0]) gdf = df.GDF(cell, kpts)
class LatentLayersSparsityLoss(_Loss): def __init__(self, args): super().__init__() self.args = args def is_valid(self, update_num): if (self.args.target_layers <= 0): return False return (update_num > (self.args.soft_update + self.args.anneal_updates)) def forward(self, layer_samples_list, update_num, sample_size): batch_loss = 0 share_loss = 0 global_sparsity_loss = 0 layer_samples = torch.stack(layer_samples_list, dim=0) if (((self.args.target_layers > 0) or (self.args.share_weight > 0)) and (update_num > (self.args.soft_update + self.args.anneal_updates))): if (update_num < (self.args.anneal_updates + self.args.soft_update)): weight_anneal = 0 elif (update_num < ((2 * self.args.anneal_updates) + self.args.soft_update)): weight_anneal = ((((update_num - self.args.soft_update) - self.args.anneal_updates) * self.args.share_weight) / self.args.anneal_updates) else: weight_anneal = 1 layer_utilization = torch.sum(layer_samples, dim=0) layer_utilization /= layer_samples.size()[0] if (self.args.share_weight > 0): share_loss = sum(((((- 1.0) * v) * math.log(v)) for v in layer_utilization if (v > 0))) batch_loss += (((weight_anneal * self.args.share_weight) * sample_size) * share_loss) if (self.args.target_layers > 0): expeted_layers = sum(layer_utilization) global_sparsity_loss = ((expeted_layers - self.args.target_layers) ** 2) batch_loss += (((weight_anneal * self.args.share_weight) * sample_size) * global_sparsity_loss) return batch_loss
class struct_s_pxe_sw_undi(ctypes.Structure): _pack_ = True _functions_ = [] _fields_ = [('Signature', ctypes.c_uint32), ('Len', ctypes.c_ubyte), ('Fudge', ctypes.c_ubyte), ('Rev', ctypes.c_ubyte), ('IFcnt', ctypes.c_ubyte), ('MajorVer', ctypes.c_ubyte), ('MinorVer', ctypes.c_ubyte), ('IFcntExt', ctypes.c_ubyte), ('reserved1', ctypes.c_ubyte), ('Implementation', ctypes.c_uint32), ('EntryPoint', ctypes.c_uint64), ('reserved2', (ctypes.c_ubyte * 3)), ('BusCnt', ctypes.c_ubyte), ('BusType', (ctypes.c_uint32 * 1))]
def test_multithreading(autoimport: AutoImport, project: Project, pkg1: Folder, mod1: File): mod1_init = pkg1.get_child('__init__.py') mod1_init.write(dedent(' def foo():\n pass\n ')) mod1.write(dedent(' foo\n ')) autoimport = AutoImport(project, memory=False) autoimport.generate_cache([mod1_init]) tp = ThreadPoolExecutor(1) results = tp.submit(autoimport.search, 'foo', True).result() assert ([('from pkg1 import foo', 'foo')] == results)
def parse_method(method): assert (type(method) is str), type(method) multilingual = False train_langs = [main_lang] eval_lang = main_lang train_en_prob = None if ('#' in method): multilingual = True (actual_method, string) = method.split('#') (train_langs, eval_lang) = string.split('-') train_langs = train_langs.split(',') final_train_langs = [] for lang in train_langs: if (('en' in lang) and (lang != 'en')): train_en_prob = float(lang[3:]) final_train_langs.append('en') else: assert (lang in lang2code.keys()) final_train_langs.append(lang) assert (eval_lang in lang2code.keys()) else: actual_method = method final_train_langs = train_langs return (actual_method, multilingual, final_train_langs, eval_lang, train_en_prob)
def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=(kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs))), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model
class AgeDB30(data.Dataset): def __init__(self, root, file_list, transform=None, loader=img_loader): self.root = root self.file_list = file_list self.transform = transform self.loader = loader self.nameLs = [] self.nameRs = [] self.folds = [] self.flags = [] with open(file_list) as f: pairs = f.read().splitlines() for (i, p) in enumerate(pairs): p = p.split(' ') nameL = p[0] nameR = p[1] fold = (i // 600) flag = int(p[2]) self.nameLs.append(nameL) self.nameRs.append(nameR) self.folds.append(fold) self.flags.append(flag) def __getitem__(self, index): img_l = self.loader(os.path.join(self.root, self.nameLs[index])) img_r = self.loader(os.path.join(self.root, self.nameRs[index])) imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] if (self.transform is not None): for i in range(len(imglist)): imglist[i] = self.transform(imglist[i]) imgs = imglist return imgs else: imgs = [torch.from_numpy(i) for i in imglist] return imgs def __len__(self): return len(self.nameLs)
class TableCellStyle(): def __init__(self, fg: str='default', bg: str='default', options: (list[str] | None)=None, align: _Align='left', cell_format: (str | None)=None) -> None: self._fg = fg self._bg = bg self._options = options self._align = 'left' self._cell_format = cell_format def cell_format(self) -> (str | None): return self._cell_format def tag(self) -> str: tag = '<fg={};bg={}' if self._options: tag += f";options={','.join(self._options)}" tag += '>' return tag def pad(self, string: str, length: int, char: str=' ') -> str: if (self._align == 'left'): return string.rjust(length, char) if (self._align == 'right'): return string.ljust(length, char) return string.center(length, char)
class LatentEditorWrapper(): def __init__(self): self.interfacegan_directions = {'age': f'{interfacegan_age}', 'smile': f'{interfacegan_smile}', 'rotation': f'{interfacegan_rotation}'} self.interfacegan_directions_tensors = {name: torch.load(path).cuda() for (name, path) in self.interfacegan_directions.items()} self.ganspace_pca = torch.load(f'{ffhq_pca}') self.ganspace_directions = {'eye_openness': (54, 7, 8, 5), 'smile': (46, 4, 5, (- 6)), 'trimmed_beard': (58, 7, 9, 7)} self.latent_editor = LatentEditor() def get_single_ganspace_edits(self, start_w, factors): latents_to_display = [] for ganspace_direction in self.ganspace_directions.values(): for factor in factors: edit_direction = list(ganspace_direction) edit_direction[(- 1)] = factor edit_direction = tuple(edit_direction) new_w = self.latent_editor.apply_ganspace(start_w, self.ganspace_pca, [edit_direction]) latents_to_display.append(new_w) return latents_to_display def get_single_interface_gan_edits(self, start_w, factors): latents_to_display = {} for direction in ['rotation', 'smile', 'age']: for factor in factors: if (direction not in latents_to_display): latents_to_display[direction] = {} latents_to_display[direction][factor] = self.latent_editor.apply_interfacegan(start_w, self.interfacegan_directions_tensors[direction], (factor / 2)) return latents_to_display
class Maze(tk.Tk, object): def __init__(self): super(Maze, self).__init__() self.action_space = ['u', 'd', 'l', 'r'] self.n_actions = len(self.action_space) self.n_features = 2 self.title('maze') self.geometry('{}x{}'.format((MAZE_H * UNIT), (MAZE_W * UNIT))) self._build_maze() def _build_maze(self): self.canvas = tk.Canvas(self, bg='white', height=(MAZE_H * UNIT), width=(MAZE_W * UNIT)) for c in range(0, (MAZE_W * UNIT), UNIT): (x0, y0, x1, y1) = (c, 0, c, (MAZE_H * UNIT)) self.canvas.create_line(x0, y0, x1, y1) for r in range(0, (MAZE_H * UNIT), UNIT): (x0, y0, x1, y1) = (0, r, (MAZE_W * UNIT), r) self.canvas.create_line(x0, y0, x1, y1) origin = np.array([25, 25]) hell1_center = (origin + np.array([(UNIT * 2), UNIT])) self.hell1 = self.canvas.create_rectangle((hell1_center[0] - 20), (hell1_center[1] - 20), (hell1_center[0] + 20), (hell1_center[1] + 20), fill='black') oval_center = (origin + (UNIT * 2)) self.oval = self.canvas.create_oval((oval_center[0] - 20), (oval_center[1] - 20), (oval_center[0] + 20), (oval_center[1] + 20), fill='red') self.rect = self.canvas.create_rectangle((origin[0] - 20), (origin[1] - 20), (origin[0] + 20), (origin[1] + 20)) self.canvas.pack() def reset(self): self.update() time.sleep(0.1) self.canvas.delete(self.rect) origin = np.array([25, 25]) self.rect = self.canvas.create_rectangle((origin[0] - 20), (origin[1] - 20), (origin[0] + 20), (origin[1] + 20), fill='yellow') return ((np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT)) def step(self, action): s = self.canvas.coords(self.rect) base_action = np.array([0, 0]) if (action == 0): if (s[1] > UNIT): base_action[1] -= UNIT elif (action == 1): if (s[1] < ((MAZE_H - 1) * UNIT)): base_action[1] += UNIT elif (action == 2): if (s[0] < ((MAZE_W - 1) * UNIT)): base_action[0] += UNIT elif (action == 3): if (s[0] > UNIT): base_action[0] -= UNIT self.canvas.move(self.rect, base_action[0], base_action[1]) next_coords = self.canvas.coords(self.rect) if (next_coords == self.canvas.coords(self.oval)): reward = 1 done = True elif (next_coords in [self.canvas.coords(self.hell1)]): reward = (- 1) done = True else: reward = 0 done = False s_ = ((np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT)) return (s_, reward, done) def render(self): self.update()
class RepeatCopyEnv(algorithmic_env.TapeAlgorithmicEnv): MIN_REWARD_SHORTFALL_FOR_PROMOTION = (- 0.1) def __init__(self, base=5): super(RepeatCopyEnv, self).__init__(base=base, chars=True) self.last = 50 def target_from_input_data(self, input_data): return ((input_data + list(reversed(input_data))) + input_data)
def test_deep_copy(): mapping = {T.__name__: int} assert (deep_copy_with(Optional[T], mapping) == Optional[int]) assert (deep_copy_with(List_origin[Optional[T]], mapping) == List_origin[Optional[int]]) mapping = {T.__name__: int, T2.__name__: str} assert (deep_copy_with(Dict_origin[(T2, List_origin[Optional[T]])], mapping) == Dict_origin[(str, List_origin[Optional[int]])])
class TestForensic(): def test_all_strings(self, forensic): assert (len(forensic.get_all_strings()) == 1005) def test_get_url(self, forensic): assert (len(forensic.get_url()) == 4) assert (' in forensic.get_url()) assert (' in forensic.get_url()) assert (' in forensic.get_url()) assert (' in forensic.get_url()) def test_get_ip(self, forensic): assert (len(forensic.get_ip()) == 3) assert ('10.0.0.200' in forensic.get_ip()) assert ('114.80.208.163' in forensic.get_ip()) assert ('10.0.0.172' in forensic.get_ip()) def test_get_content(self, forensic): assert (len(forensic.get_content()) == 4) assert ('content://sms' in forensic.get_content()) assert ('content://telephony/carriers' in forensic.get_content()) assert ('content://sms/sent' in forensic.get_content()) assert ('content://telephony/carriers/preferapn' in forensic.get_content()) def test_get_file(self, forensic): assert (len(forensic.get_file()) == 0) def test_get_base64(self, forensic): assert (len(forensic.get_base64()) == 102) def test_get_android_api(self, forensic): assert (len(forensic.get_android_api()) == 640) result = [str(x) for x in forensic.get_android_api()] assert any((('getCellLocation' in meth) for meth in result)) assert any((('sendTextMessage' in meth) for meth in result))
class SponsorContactModelTests(TestCase): def test_get_primary_contact_for_sponsor(self): sponsor = baker.make(Sponsor) baker.make(SponsorContact, sponsor=sponsor, primary=False, _quantity=5) baker.make(SponsorContact, primary=True) self.assertEqual(5, SponsorContact.objects.filter(sponsor=sponsor).count()) with self.assertRaises(SponsorContact.DoesNotExist): SponsorContact.objects.get_primary_contact(sponsor) self.assertIsNone(sponsor.primary_contact) primary_contact = baker.make(SponsorContact, primary=True, sponsor=sponsor) self.assertEqual(SponsorContact.objects.get_primary_contact(sponsor), primary_contact) self.assertEqual(sponsor.primary_contact, primary_contact)
class Scenario(ScenarioGenerator): def __init__(self): ScenarioGenerator.__init__(self) self.naming = 'numerical' self.generate_all_roads = False self.parameters['ego_speedvalue'] = [x for x in range(30, 85, 5)] self.parameters['offset'] = [(- 50), (- 25), 0, 25, 50] def road(self, **kwargs): road = xodr.create_road([xodr.Line(500)], id=1, left_lanes=2, right_lanes=2) odr = xodr.OpenDrive('myroad') odr.add_road(road) odr.adjust_roads_and_lanes() return odr def scenario(self, **kwargs): catalog = xosc.Catalog() road = xosc.RoadNetwork(roadfile=self.road_file) paramdec = xosc.ParameterDeclarations() egoname = 'Ego' targetname = 'Target1' ego_width = 2 target_width = 1.8 bb = xosc.BoundingBox(ego_width, 5, 1.8, 2.0, 0, 0.9) fa = xosc.Axle(0., 0.8, 1.68, 2.98, 0.4) ba = xosc.Axle(0., 0.8, 1.68, 0, 0.4) white_veh = xosc.Vehicle('car_white', xosc.VehicleCategory.car, bb, fa, ba, 69, 10, 10) white_veh.add_property_file('../models/car_white.osgb') white_veh.add_property('model_id', '0') bb = xosc.BoundingBox(target_width, 4.5, 1.5, 1.3, 0, 0.8) fa = xosc.Axle(0., 0.8, 1.68, 2.98, 0.4) ba = xosc.Axle(0., 0.8, 1.68, 0, 0.4) red_veh = xosc.Vehicle('car_red', xosc.VehicleCategory.car, bb, fa, ba, 69, 10, 10) red_veh.add_property_file('../models/car_red.osgb') red_veh.add_property('model_id', '2') entities = xosc.Entities() entities.add_scenario_object(egoname, white_veh) entities.add_scenario_object(targetname, red_veh) init = xosc.Init() step_time = xosc.TransitionDynamics(xosc.DynamicsShapes.step, xosc.DynamicsDimension.time, 1) cal_offset = ((kwargs['offset'] / 100) * target_width) egospeed = xosc.AbsoluteSpeedAction(0, step_time) egostart = xosc.TeleportAction(xosc.LanePosition(25, cal_offset, (- 1), 1)) startpos = (25 + (((kwargs['ego_speedvalue'] - 20) / 3.6) * (acceleration_time + ttc_at_speed))) targetspeed = xosc.AbsoluteSpeedAction(0, step_time) targetstart = xosc.TeleportAction(xosc.LanePosition(startpos, 0, (- 1), 1)) init.add_init_action(egoname, egospeed) init.add_init_action(egoname, egostart) init.add_init_action(targetname, targetspeed) init.add_init_action(targetname, targetstart) trigger = xosc.ValueTrigger('starttrigger', 0, xosc.ConditionEdge.rising, xosc.SimulationTimeCondition(1, xosc.Rule.greaterThan)) eventego = xosc.Event('egospeedchange', xosc.Priority.overwrite) eventego.add_trigger(trigger) ego_action = xosc.AbsoluteSpeedAction((kwargs['ego_speedvalue'] / 3.6), xosc.TransitionDynamics(xosc.DynamicsShapes.linear, xosc.DynamicsDimension.time, acceleration_time)) eventego.add_action('newspeed', ego_action) event_tar = xosc.Event('targetspeedchange', xosc.Priority.overwrite) event_tar.add_trigger(trigger) target_action = xosc.AbsoluteSpeedAction((20 / 3.6), xosc.TransitionDynamics(xosc.DynamicsShapes.linear, xosc.DynamicsDimension.time, acceleration_time)) event_tar.add_action('targetspeed', target_action) ego_man = xosc.Maneuver('ego man') ego_man.add_event(eventego) tar_man = xosc.Maneuver('target man') tar_man.add_event(event_tar) egomangr = xosc.ManeuverGroup('egomangr') egomangr.add_actor(egoname) egomangr.add_maneuver(ego_man) tarmangr = xosc.ManeuverGroup('tarmangr') tarmangr.add_actor(targetname) tarmangr.add_maneuver(tar_man) act = xosc.Act('ccrm act', xosc.ValueTrigger('starttrigger', 0, xosc.ConditionEdge.rising, xosc.SimulationTimeCondition(0, xosc.Rule.greaterThan))) act.add_maneuver_group(egomangr) act.add_maneuver_group(tarmangr) story = xosc.Story('mystory') story.add_act(act) sb = xosc.StoryBoard(init, xosc.ValueTrigger('stop_simulation', 0, xosc.ConditionEdge.rising, xosc.SimulationTimeCondition(((ttc_at_speed * 2) + acceleration_time), xosc.Rule.greaterThan), 'stop')) sb.add_story(story) sce = xosc.Scenario(((('CCRm_v: ' + str(kwargs['ego_speedvalue'])) + ', offset: ') + str(kwargs['offset'])), 'Mandolin', paramdec, entities=entities, storyboard=sb, roadnetwork=road, catalog=catalog) return sce
.functions def test_groupby_agg_multi_column(): df = pd.DataFrame({'date': ['', '', '', '', '', ''], 'user_id': [1, 2, 1, 2, 1, 2], 'values': [1, 2, 3, 4, 5, 6]}) df_new = df.groupby_agg(by=['date'], new_column_name='values_avg', agg_column_name='values', agg='mean') expected_agg = np.array([1.5, 1.5, 3.5, 3.5, 5.5, 5.5]) np.testing.assert_equal(df_new['values_avg'], expected_agg)
class TestMeasurementErrorMitigation(QiskitAquaTestCase): def setUp(self): super().setUp() try: from qiskit import Aer except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return def test_measurement_error_mitigation(self): try: from qiskit import Aer from qiskit.providers.aer import noise except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return aqua_globals.random_seed = 0 noise_model = noise.NoiseModel() read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]]) noise_model.add_all_qubit_readout_error(read_err) backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend=backend, seed_simulator=167, seed_transpiler=167, noise_model=noise_model) qi_with_mitigation = QuantumInstance(backend=backend, seed_simulator=167, seed_transpiler=167, noise_model=noise_model, measurement_error_mitigation_cls=CompleteMeasFitter) oracle = LogicalExpressionOracle('a & b & c') grover = Grover(oracle) result_wo_mitigation = grover.run(quantum_instance) self.assertGreater(quantum_instance.time_taken, 0.0) quantum_instance.reset_execution_results() prob_top_meas_wo_mitigation = result_wo_mitigation.measurement[result_wo_mitigation.top_measurement] result_w_mitigation = grover.run(qi_with_mitigation) prob_top_meas_w_mitigation = result_w_mitigation.measurement[result_w_mitigation.top_measurement] self.assertGreaterEqual(prob_top_meas_w_mitigation, prob_top_meas_wo_mitigation) def test_measurement_error_mitigation_auto_refresh(self): try: from qiskit import Aer from qiskit.providers.aer import noise except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return aqua_globals.random_seed = 0 noise_model = noise.NoiseModel() read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]]) noise_model.add_all_qubit_readout_error(read_err) backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend=backend, seed_simulator=1679, seed_transpiler=167, noise_model=noise_model, measurement_error_mitigation_cls=CompleteMeasFitter, cals_matrix_refresh_period=0) oracle = LogicalExpressionOracle('a & b & c') grover = Grover(oracle) _ = grover.run(quantum_instance) self.assertGreater(quantum_instance.time_taken, 0.0) quantum_instance.reset_execution_results() (cals_matrix_1, timestamp_1) = quantum_instance.cals_matrix(qubit_index=[0, 1, 2]) time.sleep(15) aqua_globals.random_seed = 2 quantum_instance.set_config(seed_simulator=111) _ = grover.run(quantum_instance) (cals_matrix_2, timestamp_2) = quantum_instance.cals_matrix(qubit_index=[0, 1, 2]) diff = (cals_matrix_1 - cals_matrix_2) total_diff = np.sum(np.abs(diff)) self.assertGreater(total_diff, 0.0) self.assertGreater(timestamp_2, timestamp_1) def test_measurement_error_mitigation_with_dedicated_shots(self): try: from qiskit import Aer from qiskit.providers.aer import noise except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return aqua_globals.random_seed = 0 noise_model = noise.NoiseModel() read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]]) noise_model.add_all_qubit_readout_error(read_err) backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend=backend, seed_simulator=1679, seed_transpiler=167, shots=100, noise_model=noise_model, measurement_error_mitigation_cls=CompleteMeasFitter, cals_matrix_refresh_period=0) oracle = LogicalExpressionOracle('a & b & c') grover = Grover(oracle) _ = grover.run(quantum_instance) self.assertGreater(quantum_instance.time_taken, 0.0) quantum_instance.reset_execution_results() (cals_matrix_1, timestamp_1) = quantum_instance.cals_matrix(qubit_index=[0, 1, 2]) quantum_instance.measurement_error_mitigation_shots = 1000 _ = grover.run(quantum_instance) (cals_matrix_2, timestamp_2) = quantum_instance.cals_matrix(qubit_index=[0, 1, 2]) diff = (cals_matrix_1 - cals_matrix_2) total_diff = np.sum(np.abs(diff)) self.assertGreater(total_diff, 0.0) self.assertGreater(timestamp_2, timestamp_1) def test_measurement_error_mitigation_with_diff_qubit_order(self): try: from qiskit import Aer from qiskit.providers.aer import noise except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return aqua_globals.random_seed = 0 noise_model = noise.NoiseModel() read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]]) noise_model.add_all_qubit_readout_error(read_err) backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend=backend, seed_simulator=1679, seed_transpiler=167, shots=1000, noise_model=noise_model, measurement_error_mitigation_cls=CompleteMeasFitter, cals_matrix_refresh_period=0) qc1 = QuantumCircuit(2, 2) qc1.h(0) qc1.cx(0, 1) qc1.measure(0, 0) qc1.measure(1, 1) qc2 = QuantumCircuit(2, 2) qc2.h(0) qc2.cx(0, 1) qc2.measure(1, 0) qc2.measure(0, 1) quantum_instance.execute([qc1, qc2]) self.assertGreater(quantum_instance.time_taken, 0.0) quantum_instance.reset_execution_results() qc3 = QuantumCircuit(3, 3) qc3.h(2) qc3.cx(1, 2) qc3.measure(2, 1) qc3.measure(1, 2) self.assertRaises(AquaError, quantum_instance.execute, [qc1, qc3]) def test_measurement_error_mitigation_with_vqe(self): try: from qiskit import Aer from qiskit.providers.aer import noise except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return aqua_globals.random_seed = 0 noise_model = noise.NoiseModel() read_err = noise.errors.readout_error.ReadoutError([[0.9, 0.1], [0.25, 0.75]]) noise_model.add_all_qubit_readout_error(read_err) backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend=backend, seed_simulator=167, seed_transpiler=167, noise_model=noise_model, measurement_error_mitigation_cls=CompleteMeasFitter) h2_hamiltonian = ((((((- 1.) * (I ^ I)) + (0. * (I ^ Z))) - (0. * (Z ^ I))) - (0. * (Z ^ Z))) + (0. * (X ^ X))) optimizer = SPSA(maxiter=200) var_form = EfficientSU2(2, reps=1) vqe = VQE(var_form=var_form, operator=h2_hamiltonian, quantum_instance=quantum_instance, optimizer=optimizer) result = vqe.compute_minimum_eigenvalue() self.assertGreater(quantum_instance.time_taken, 0.0) quantum_instance.reset_execution_results() self.assertAlmostEqual(result.eigenvalue.real, (- 1.86), places=2)
class BypassQueue1EntryRTL(Component): def construct(s, EntryType): s.recv = RecvIfcRTL(EntryType) s.send = SendIfcRTL(EntryType) s.count = OutPort() s.full = Wire() s.entry = Wire(EntryType) s.bypass_mux = m = Mux(EntryType, 2) m.in_[0] //= s.recv.msg m.in_[1] //= s.entry m.out //= s.send.msg m.sel //= s.full s.count //= s.full s.send.val //= (lambda : (s.full | s.recv.val)) s.recv.rdy //= (lambda : (~ s.full)) _ff def ff_bypass1(): if s.reset: s.full <<= 0 else: s.full <<= ((~ s.send.rdy) & (s.full | s.recv.val)) if (((~ s.send.rdy) & (~ s.full)) & s.recv.val): s.entry <<= s.recv.msg def line_trace(s): return f'{s.recv}({s.full}){s.send}'
def train(): parser = argparse.ArgumentParser('FGVC', add_help=False) parser.add_argument('--epochs', type=int, default=300, help='training epochs') parser.add_argument('--batch_size', type=int, default=16, help='batch size for training') parser.add_argument('--resume', type=str, default='', help='resume from saved model path') parser.add_argument('--dataset_name', type=str, default='cub', help='dataset name') parser.add_argument('--topn', type=int, default=4, help='parts number') parser.add_argument('--backbone', type=str, default='resnet50', help='backbone') parser.add_argument('--lr', type=float, default=0.002, help='learning rate') (args, _) = parser.parse_known_args() epochs = args.epochs batch_size = args.batch_size data_config = {'air': [100, '../Data/fgvc-aircraft-2013b'], 'car': [196, '../Data/stanford_cars'], 'dog': [120, '../Data/StanfordDogs'], 'cub': [200, '../Data/CUB_200_2011']} dataset_name = args.dataset_name (classes_num, data_root) = data_config[dataset_name] if (dataset_name == 'air'): trainset = AIR(root=data_root, is_train=True, data_len=None) testset = AIR(root=data_root, is_train=False, data_len=None) elif (dataset_name == 'car'): trainset = CAR(root=data_root, is_train=True, data_len=None) testset = CAR(root=data_root, is_train=False, data_len=None) elif (dataset_name == 'dog'): trainset = DOG(root=data_root, is_train=True, data_len=None) testset = DOG(root=data_root, is_train=False, data_len=None) elif (dataset_name == 'cub'): trainset = CUB(root=data_root, is_train=True, data_len=None) testset = CUB(root=data_root, is_train=False, data_len=None) num_workers = (16 if torch.cuda.is_available() else 0) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=False) topn = args.topn exp_dir = ((((dataset_name + '_') + args.backbone) + '_') + str(topn)) os.makedirs(exp_dir, exist_ok=True) if (args.resume != ''): net = torch.load(args.resume) else: net = load_model(backbone=args.backbone, pretrain=True, require_grad=True, classes_num=classes_num, topn=topn) if torch.cuda.is_available(): device = torch.device('cuda') net = net.to(device) netp = torch.nn.DataParallel(net) else: device = torch.device('cpu') netp = net CELoss = nn.CrossEntropyLoss() deep_paras = [para for (name, para) in net.named_parameters() if ('backbone' not in name)] optimizer = optim.SGD([{'params': deep_paras}, {'params': net.backbone.parameters(), 'lr': (args.lr / 10.0)}], lr=args.lr, momentum=0.9, weight_decay=0.0005) max_val_acc = 0 for epoch in range(1, (epochs + 1)): print(('\nEpoch: %d' % epoch)) optimizer.param_groups[0]['lr'] = cosine_anneal_schedule(epoch, epochs, args.lr) optimizer.param_groups[1]['lr'] = cosine_anneal_schedule(epoch, epochs, (args.lr / 10.0)) net.train() num_correct = ([0] * 4) for (_, (inputs, targets)) in enumerate(trainloader): if (inputs.shape[0] < batch_size): continue if torch.cuda.is_available(): (inputs, targets) = (inputs.to(device), targets.to(device)) optimizer.zero_grad() (y1, y2, y3, y4, yp1, yp2, yp3, yp4, part_probs, f1_m, f1, f2_m, f2, f3_m, f3) = netp(inputs) loss1 = (smooth_CE(y1, targets, 0.7) * 1) loss2 = (smooth_CE(y2, targets, 0.8) * 1) loss3 = (smooth_CE(y3, targets, 0.9) * 1) loss4 = (smooth_CE(y4, targets, 1) * 1) loss_img = (((loss1 + loss2) + loss3) + loss4) targets_parts = targets.unsqueeze(1).repeat(1, topn).view((- 1)) lossp1 = smooth_CE(yp1, targets_parts, 0.7) lossp2 = smooth_CE(yp2, targets_parts, 0.8) lossp3 = smooth_CE(yp3, targets_parts, 0.9) lossp4 = smooth_CE(yp4, targets_parts, 1) lossp_rank = ranking_loss(part_probs, list_loss(yp4, targets_parts).view(batch_size, topn)) loss_parts = ((((lossp1 + lossp2) + lossp3) + lossp4) + lossp_rank) (p, q) = (F.log_softmax(f1_m, dim=(- 1)), F.softmax(f1, dim=(- 1))) loss_reg = (torch.mean((- torch.sum((p * q), dim=(- 1)))) * 0.1) (p, q) = (F.log_softmax(f2_m, dim=(- 1)), F.softmax(f2, dim=(- 1))) loss_reg += (torch.mean((- torch.sum((p * q), dim=(- 1)))) * 0.1) (p, q) = (F.log_softmax(f3_m, dim=(- 1)), F.softmax(f3, dim=(- 1))) loss_reg += (torch.mean((- torch.sum((p * q), dim=(- 1)))) * 0.1) loss = ((loss_img + loss_parts) + loss_reg) (_, p1) = torch.max(y1.data, 1) (_, p2) = torch.max(y2.data, 1) (_, p3) = torch.max(y3.data, 1) (_, p4) = torch.max(y4.data, 1) num_correct[0] += p1.eq(targets.data).cpu().sum() num_correct[1] += p2.eq(targets.data).cpu().sum() num_correct[2] += p3.eq(targets.data).cpu().sum() num_correct[3] += p4.eq(targets.data).cpu().sum() loss.backward() optimizer.step() total = len(trainset) acc1 = ((100.0 * float(num_correct[0])) / total) acc2 = ((100.0 * float(num_correct[1])) / total) acc3 = ((100.0 * float(num_correct[2])) / total) acc4 = ((100.0 * float(num_correct[3])) / total) result_str = ('Iteration %d (train) | acc1 = %.5f | acc2 = %.5f | acc3 = %.5f | acc4 = %.5f \n' % (epoch, acc1, acc2, acc3, acc4)) print(result_str) with open((exp_dir + '/results_train.txt'), 'a') as file: file.write(result_str) if ((epoch < 5) or ((epoch % 10) == 0)): (acc1, acc2, acc3, acc4, acc_test) = test(net, testset, batch_size) if (acc_test > max_val_acc): max_val_acc = acc_test net.cpu() torch.save(net.state_dict(), (('./' + exp_dir) + '/model.pth')) net.to(device) result_str = ('Iteration %d | acc1 = %.5f | acc2 = %.5f | acc3 = %.5f | acc4 = %.5f | acc_test = %.5f \n' % (epoch, acc1, acc2, acc3, acc4, acc_test)) print(result_str) with open((exp_dir + '/results_test.txt'), 'a') as file: file.write(result_str)
def create_test_header(earth_model, dataset_id, is_full_disk, is_rapid_scan, good_qual='OK'): if (dataset_id['name'] == 'HRV'): reference_grid = 'ReferenceGridHRV' column_dir_grid_step = 1. line_dir_grid_step = 1. else: reference_grid = 'ReferenceGridVIS_IR' column_dir_grid_step = 3. line_dir_grid_step = 3. if is_full_disk: north = 3712 east = 1 west = 3712 south = 1 n_visir_cols = 3712 n_visir_lines = 3712 n_hrv_cols = 11136 n_hrv_lines = 11136 ssp_lon = 0 elif is_rapid_scan: north = 3712 east = 1 west = 3712 south = 2321 n_visir_cols = 3712 n_visir_lines = 1392 n_hrv_cols = 11136 n_hrv_lines = 4176 ssp_lon = 9.5 else: north = 3574 east = 78 west = 2591 south = 1746 n_visir_cols = 2516 n_visir_lines = ((north - south) + 1) n_hrv_cols = (n_visir_cols * 3) n_hrv_lines = (n_visir_lines * 3) ssp_lon = 0 header = {'15_MAIN_PRODUCT_HEADER': {'QQOV': {'Name': 'QQOV', 'Value': good_qual}}, '15_DATA_HEADER': {'ImageDescription': {reference_grid: {'ColumnDirGridStep': column_dir_grid_step, 'LineDirGridStep': line_dir_grid_step, 'GridOrigin': 2}, 'ProjectionDescription': {'LongitudeOfSSP': ssp_lon}}, 'GeometricProcessing': {'EarthModel': {'TypeOfEarthModel': earth_model, 'EquatorialRadius': 6378169.0, 'NorthPolarRadius': 6356583., 'SouthPolarRadius': 6356583.}}, 'SatelliteStatus': {'SatelliteDefinition': {'SatelliteId': 324}}}, '15_SECONDARY_PRODUCT_HEADER': {'NorthLineSelectedRectangle': {'Value': north}, 'EastColumnSelectedRectangle': {'Value': east}, 'WestColumnSelectedRectangle': {'Value': west}, 'SouthLineSelectedRectangle': {'Value': south}, 'SelectedBandIDs': {'Value': 'xxxxxxxxxxxx'}, 'NumberColumnsVISIR': {'Value': n_visir_cols}, 'NumberLinesVISIR': {'Value': n_visir_lines}, 'NumberColumnsHRV': {'Value': n_hrv_cols}, 'NumberLinesHRV': {'Value': n_hrv_lines}}} return header
def check_environment(): try: import websockets except ImportError: print('failed to import websockets; is src on PYTHONPATH?') return False try: import coverage except ImportError: print('failed to locate Coverage.py; is it installed?') return False return True
def bifpn_config(min_level, max_level, weight_method=None): p = OmegaConf.create() weight_method = (weight_method or 'fastattn') num_levels = ((max_level - min_level) + 1) node_ids = {(min_level + i): [i] for i in range(num_levels)} level_last_id = (lambda level: node_ids[level][(- 1)]) level_all_ids = (lambda level: node_ids[level]) id_cnt = itertools.count(num_levels) p.nodes = [] for i in range((max_level - 1), (min_level - 1), (- 1)): p.nodes.append({'reduction': (1 << i), 'inputs_offsets': [level_last_id(i), level_last_id((i + 1))], 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) for i in range((min_level + 1), (max_level + 1)): p.nodes.append({'reduction': (1 << i), 'inputs_offsets': (level_all_ids(i) + [level_last_id((i - 1))]), 'weight_method': weight_method}) node_ids[i].append(next(id_cnt)) return p
class SegmentationNet10aTrunk(VGGTrunk): def __init__(self, config, cfg): super(SegmentationNet10aTrunk, self).__init__() self.batchnorm_track = config.batchnorm_track assert ((config.input_sz % 2) == 0) self.conv_size = 3 self.pad = 1 self.cfg = cfg self.in_channels = (config.in_channels if hasattr(config, 'in_channels') else 3) self.features = self._make_layers() def forward(self, x): x = self.features(x) return x
('PyQt6.QtGui.QAction.triggered') ('beeref.actions.mixin.menu_structure') ('beeref.actions.mixin.actions') ('beeref.actions.mixin.KeyboardSettings.get_shortcuts') def test_create_recent_files_more_files_than_shortcuts(kb_mock, actions_mock, menu_mock, triggered_mock, qapp): kb_mock.side_effect = (lambda group, key, default: default) widget = FooWidget() widget.settings.get_recent_files.return_value = [os.path.abspath(f'{i}.bee') for i in range(15)] menu_mock.__iter__.return_value = [{'menu': 'Open &Recent', 'items': '_build_recent_files'}] widget.build_menu_and_actions() triggered_mock.connect.assert_called() assert (len(widget.actions()) == 15) qaction1 = widget.actions()[0] assert (qaction1.text() == '0.bee') assert (qaction1.shortcut() == 'Ctrl+1') assert (qaction1.isEnabled() is True) assert (widget.bee_actions['recent_files_0'] == qaction1) qaction10 = widget.actions()[9] assert (qaction10.text() == '9.bee') assert (qaction10.shortcut() == 'Ctrl+0') assert (qaction10.isEnabled() is True) assert (widget.bee_actions['recent_files_9'] == qaction10) qaction15 = widget.actions()[(- 1)] assert (qaction15.text() == '14.bee') assert (qaction15.shortcut() == '') assert (qaction15.isEnabled() is True) assert (widget.bee_actions['recent_files_14'] == qaction15) assert (kb_mock.call_count == 10) kb_mock.assert_has_calls([call('Actions', 'recent_files_0', ['Ctrl+1']), call('Actions', 'recent_files_9', ['Ctrl+0'])], any_order=True)
class TestInit(): def test_empty(self): nl = usertypes.NeighborList() assert (nl.items == []) def test_items(self): nl = usertypes.NeighborList([1, 2, 3]) assert (nl.items == [1, 2, 3]) def test_len(self): nl = usertypes.NeighborList([1, 2, 3]) assert (len(nl) == 3) def test_contains(self): nl = usertypes.NeighborList([1, 2, 3]) assert (2 in nl) assert (4 not in nl) def test_invalid_mode(self): with pytest.raises(TypeError): usertypes.NeighborList(mode='blah')
def get_sub_macros(sub: dict[(str, str)]) -> tuple[(str, str)]: define_macros = [] undef_macros = [] define_macros.append(f"#define FAIL {lquote_macro(sub['fail'])}") undef_macros.append('#undef FAIL') if ('params' in sub): define_macros.append(f"#define PARAMS {sub['params']}") undef_macros.append('#undef PARAMS') return ('\n'.join(define_macros), '\n'.join(undef_macros))
class IntelHex(object): def __init__(self, source=None): self.padding = 255 self.start_addr = None self._buf = {} self._offset = 0 if (source is not None): if (isinstance(source, StrType) or getattr(source, 'read', None)): self.loadhex(source) elif isinstance(source, dict): self.fromdict(source) elif isinstance(source, IntelHex): self.padding = source.padding if source.start_addr: self.start_addr = source.start_addr.copy() self._buf = source._buf.copy() else: raise ValueError('source: bad initializer type') def _decode_record(self, s, line=0): s = s.rstrip('\r\n') if (not s): return if (s[0] == ':'): try: bin = array('B', unhexlify(asbytes(s[1:]))) except (TypeError, ValueError): raise HexRecordError(line=line) length = len(bin) if (length < 5): raise HexRecordError(line=line) else: raise HexRecordError(line=line) record_length = bin[0] if (length != (5 + record_length)): raise RecordLengthError(line=line) addr = ((bin[1] * 256) + bin[2]) record_type = bin[3] if (not (0 <= record_type <= 5)): raise RecordTypeError(line=line) crc = sum(bin) crc &= 255 if (crc != 0): raise RecordChecksumError(line=line) if (record_type == 0): addr += self._offset for i in range_g(4, (4 + record_length)): if (not (self._buf.get(addr, None) is None)): raise AddressOverlapError(address=addr, line=line) self._buf[addr] = bin[i] addr += 1 elif (record_type == 1): if (record_length != 0): raise EOFRecordError(line=line) raise _EndOfFile elif (record_type == 2): if ((record_length != 2) or (addr != 0)): raise ExtendedSegmentAddressRecordError(line=line) self._offset = (((bin[4] * 256) + bin[5]) * 16) elif (record_type == 4): if ((record_length != 2) or (addr != 0)): raise ExtendedLinearAddressRecordError(line=line) self._offset = (((bin[4] * 256) + bin[5]) * 65536) elif (record_type == 3): if ((record_length != 4) or (addr != 0)): raise StartSegmentAddressRecordError(line=line) if self.start_addr: raise DuplicateStartAddressRecordError(line=line) self.start_addr = {'CS': ((bin[4] * 256) + bin[5]), 'IP': ((bin[6] * 256) + bin[7])} elif (record_type == 5): if ((record_length != 4) or (addr != 0)): raise StartLinearAddressRecordError(line=line) if self.start_addr: raise DuplicateStartAddressRecordError(line=line) self.start_addr = {'EIP': ((((bin[4] * ) + (bin[5] * 65536)) + (bin[6] * 256)) + bin[7])} def loadhex(self, fobj): if (getattr(fobj, 'read', None) is None): fobj = open(fobj, 'r') fclose = fobj.close else: fclose = None self._offset = 0 line = 0 try: decode = self._decode_record try: for s in fobj: line += 1 decode(s, line) except _EndOfFile: pass finally: if fclose: fclose() def loadbin(self, fobj, offset=0): fread = getattr(fobj, 'read', None) if (fread is None): f = open(fobj, 'rb') fread = f.read fclose = f.close else: fclose = None try: self.frombytes(array('B', asbytes(fread())), offset=offset) finally: if fclose: fclose() def loadfile(self, fobj, format): if (format == 'hex'): self.loadhex(fobj) elif (format == 'bin'): self.loadbin(fobj) else: raise ValueError(('format should be either "hex" or "bin"; got %r instead' % format)) fromfile = loadfile def fromdict(self, dikt): s = dikt.copy() start_addr = s.get('start_addr') if (start_addr is not None): del s['start_addr'] for k in dict_keys_g(s): if ((type(k) not in IntTypes) or (k < 0)): raise ValueError('Source dictionary should have only int keys') self._buf.update(s) if (start_addr is not None): self.start_addr = start_addr def frombytes(self, bytes, offset=0): for b in bytes: self._buf[offset] = b offset += 1 def _get_start_end(self, start=None, end=None, size=None): if (((start, end) == (None, None)) and (self._buf == {})): raise EmptyIntelHexError if (size is not None): if (None not in (start, end)): raise ValueError("tobinarray: you can't use start,end and size arguments in the same time") if ((start, end) == (None, None)): start = self.minaddr() if (start is not None): end = ((start + size) - 1) else: start = ((end - size) + 1) if (start < 0): raise ValueError(('tobinarray: invalid size (%d) for given end address (%d)' % (size, end))) else: if (start is None): start = self.minaddr() if (end is None): end = self.maxaddr() if (start > end): (start, end) = (end, start) return (start, end) def tobinarray(self, start=None, end=None, pad=_DEPRECATED, size=None): if (not isinstance(pad, _DeprecatedParam)): print("IntelHex.tobinarray: 'pad' parameter is deprecated.") if (pad is not None): print('Please, use IntelHex.padding attribute instead.') else: print("Please, don't pass it explicitly.") print('Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)') else: pad = None return self._tobinarray_really(start, end, pad, size) def _tobinarray_really(self, start, end, pad, size): if (pad is None): pad = self.padding bin = array('B') if ((self._buf == {}) and (None in (start, end))): return bin if ((size is not None) and (size <= 0)): raise ValueError('tobinarray: wrong value for size') (start, end) = self._get_start_end(start, end, size) for i in range_g(start, (end + 1)): bin.append(self._buf.get(i, pad)) return bin def tobinstr(self, start=None, end=None, pad=_DEPRECATED, size=None): if (not isinstance(pad, _DeprecatedParam)): print("IntelHex.tobinstr: 'pad' parameter is deprecated.") if (pad is not None): print('Please, use IntelHex.padding attribute instead.') else: print("Please, don't pass it explicitly.") print('Use syntax like this: ih.tobinstr(start=xxx, end=yyy, size=zzz)') else: pad = None return self._tobinstr_really(start, end, pad, size) def _tobinstr_really(self, start, end, pad, size): return array_tobytes(self._tobinarray_really(start, end, pad, size)) def tobinfile(self, fobj, start=None, end=None, pad=_DEPRECATED, size=None): if (not isinstance(pad, _DeprecatedParam)): print("IntelHex.tobinfile: 'pad' parameter is deprecated.") if (pad is not None): print('Please, use IntelHex.padding attribute instead.') else: print("Please, don't pass it explicitly.") print('Use syntax like this: ih.tobinfile(start=xxx, end=yyy, size=zzz)') else: pad = None if (getattr(fobj, 'write', None) is None): fobj = open(fobj, 'wb') close_fd = True else: close_fd = False fobj.write(self._tobinstr_really(start, end, pad, size)) if close_fd: fobj.close() def todict(self): r = {} r.update(self._buf) if self.start_addr: r['start_addr'] = self.start_addr return r def addresses(self): aa = dict_keys(self._buf) aa.sort() return aa def minaddr(self): aa = dict_keys(self._buf) if (aa == []): return None else: return min(aa) def maxaddr(self): aa = dict_keys(self._buf) if (aa == []): return None else: return max(aa) def __getitem__(self, addr): t = type(addr) if (t in IntTypes): if (addr < 0): raise TypeError('Address should be >= 0.') addresses = dict_keys(self._buf) if ((not addresses) or (addr > max(addresses))): raise IndexError return self._buf.get(addr, self.padding) elif (t == slice): addresses = dict_keys(self._buf) ih = IntelHex() if addresses: addresses.sort() start = (addr.start if (addr.start is not None) else addresses[0]) stop = (addr.stop if (addr.stop is not None) else (addresses[(- 1)] + 1)) step = (addr.step or 1) for i in range_g(start, stop, step): x = self._buf.get(i) if (x is not None): ih[i] = x return ih else: raise TypeError(('Address has unsupported type: %s' % t)) def __setitem__(self, addr, byte): t = type(addr) if (t in IntTypes): if (addr < 0): raise TypeError('Address should be >= 0.') self._buf[addr] = byte elif (t == slice): if (not isinstance(byte, (list, tuple))): raise ValueError('Slice operation expects sequence of bytes') start = addr.start stop = addr.stop step = (addr.step or 1) if (None not in (start, stop)): ra = range_l(start, stop, step) if (len(ra) != len(byte)): raise ValueError('Length of bytes sequence does not match address range') elif ((start, stop) == (None, None)): raise TypeError('Unsupported address range') elif (start is None): start = (stop - len(byte)) elif (stop is None): stop = (start + len(byte)) if (start < 0): raise TypeError('start address cannot be negative') if (stop < 0): raise TypeError('stop address cannot be negative') j = 0 for i in range_g(start, stop, step): self._buf[i] = byte[j] j += 1 else: raise TypeError(('Address has unsupported type: %s' % t)) def __delitem__(self, addr): t = type(addr) if (t in IntTypes): if (addr < 0): raise TypeError('Address should be >= 0.') del self._buf[addr] elif (t == slice): addresses = dict_keys(self._buf) if addresses: addresses.sort() start = (addr.start or addresses[0]) stop = (addr.stop or (addresses[(- 1)] + 1)) step = (addr.step or 1) for i in range_g(start, stop, step): x = self._buf.get(i) if (x is not None): del self._buf[i] else: raise TypeError(('Address has unsupported type: %s' % t)) def __len__(self): return len(dict_keys(self._buf)) def _get_eol_textfile(eolstyle, platform): if (eolstyle == 'native'): return '\n' elif (eolstyle == 'CRLF'): if (platform != 'win32'): return '\r\n' else: return '\n' else: raise ValueError(('wrong eolstyle %s' % repr(eolstyle))) _get_eol_textfile = staticmethod(_get_eol_textfile) def write_hex_file(self, f, write_start_addr=True, eolstyle='native', byte_count=16): if ((byte_count > 255) or (byte_count < 1)): raise ValueError(('wrong byte_count value: %s' % byte_count)) fwrite = getattr(f, 'write', None) if fwrite: fobj = f fclose = None else: fobj = open(f, 'w') fwrite = fobj.write fclose = fobj.close eol = IntelHex._get_eol_textfile(eolstyle, sys.platform) if (sys.version_info[0] >= 3): table = bytes(range_l(256)).upper() else: table = ''.join((chr(i).upper() for i in range_g(256))) if (self.start_addr and write_start_addr): keys = dict_keys(self.start_addr) keys.sort() bin = array('B', asbytes(('\x00' * 9))) if (keys == ['CS', 'IP']): bin[0] = 4 bin[1] = 0 bin[2] = 0 bin[3] = 3 cs = self.start_addr['CS'] bin[4] = ((cs >> 8) & 255) bin[5] = (cs & 255) ip = self.start_addr['IP'] bin[6] = ((ip >> 8) & 255) bin[7] = (ip & 255) bin[8] = ((- sum(bin)) & 255) fwrite(((':' + asstr(hexlify(array_tobytes(bin)).translate(table))) + eol)) elif (keys == ['EIP']): bin[0] = 4 bin[1] = 0 bin[2] = 0 bin[3] = 5 eip = self.start_addr['EIP'] bin[4] = ((eip >> 24) & 255) bin[5] = ((eip >> 16) & 255) bin[6] = ((eip >> 8) & 255) bin[7] = (eip & 255) bin[8] = ((- sum(bin)) & 255) fwrite(((':' + asstr(hexlify(array_tobytes(bin)).translate(table))) + eol)) else: if fclose: fclose() raise InvalidStartAddressValueError(start_addr=self.start_addr) addresses = dict_keys(self._buf) addresses.sort() addr_len = len(addresses) if addr_len: minaddr = addresses[0] maxaddr = addresses[(- 1)] if (maxaddr > 65535): need_offset_record = True else: need_offset_record = False high_ofs = 0 cur_addr = minaddr cur_ix = 0 while (cur_addr <= maxaddr): if need_offset_record: bin = array('B', asbytes(('\x00' * 7))) bin[0] = 2 bin[1] = 0 bin[2] = 0 bin[3] = 4 high_ofs = int((cur_addr >> 16)) b = divmod(high_ofs, 256) bin[4] = b[0] bin[5] = b[1] bin[6] = ((- sum(bin)) & 255) fwrite(((':' + asstr(hexlify(array_tobytes(bin)).translate(table))) + eol)) while True: low_addr = (cur_addr & 65535) chain_len = min((byte_count - 1), (65535 - low_addr), (maxaddr - cur_addr)) stop_addr = (cur_addr + chain_len) if chain_len: ix = bisect_right(addresses, stop_addr, cur_ix, min(((cur_ix + chain_len) + 1), addr_len)) chain_len = (ix - cur_ix) else: chain_len = 1 bin = array('B', asbytes(('\x00' * (5 + chain_len)))) b = divmod(low_addr, 256) bin[1] = b[0] bin[2] = b[1] bin[3] = 0 try: for i in range_g(chain_len): bin[(4 + i)] = self._buf[(cur_addr + i)] except KeyError: chain_len = i bin = bin[:(5 + i)] bin[0] = chain_len bin[(4 + chain_len)] = ((- sum(bin)) & 255) fwrite(((':' + asstr(hexlify(array_tobytes(bin)).translate(table))) + eol)) cur_ix += chain_len if (cur_ix < addr_len): cur_addr = addresses[cur_ix] else: cur_addr = (maxaddr + 1) break high_addr = int((cur_addr >> 16)) if (high_addr > high_ofs): break fwrite((':FF' + eol)) if fclose: fclose() def tofile(self, fobj, format, byte_count=16): if (format == 'hex'): self.write_hex_file(fobj, byte_count=byte_count) elif (format == 'bin'): self.tobinfile(fobj) else: raise ValueError(('format should be either "hex" or "bin"; got %r instead' % format)) def gets(self, addr, length): a = array('B', asbytes(('\x00' * length))) try: for i in range_g(length): a[i] = self._buf[(addr + i)] except KeyError: raise NotEnoughDataError(address=addr, length=length) return array_tobytes(a) def puts(self, addr, s): a = array('B', asbytes(s)) for i in range_g(len(a)): self._buf[(addr + i)] = a[i] def getsz(self, addr): i = 0 try: while True: if (self._buf[(addr + i)] == 0): break i += 1 except KeyError: raise NotEnoughDataError(msg=('Bad access at 0x%X: not enough data to read zero-terminated string' % addr)) return self.gets(addr, i) def putsz(self, addr, s): self.puts(addr, s) self._buf[(addr + len(s))] = 0 def find(self, sub, start=None, end=None): sub = bytes(sub) for (start, end) in self[slice(start, end)].segments(): b = self.gets(start, (end - start)) i = b.find(sub) if (i != (- 1)): return (start + i) return (- 1) def dump(self, tofile=None, width=16, withpadding=False): if ((not isinstance(width, int)) or (width < 1)): raise ValueError('width must be a positive integer.') width = int(width) if (tofile is None): tofile = sys.stdout if (self.start_addr is not None): cs = self.start_addr.get('CS') ip = self.start_addr.get('IP') eip = self.start_addr.get('EIP') if ((eip is not None) and (cs is None) and (ip is None)): tofile.write(('EIP = 0x%08X\n' % eip)) elif ((eip is None) and (cs is not None) and (ip is not None)): tofile.write(('CS = 0x%04X, IP = 0x%04X\n' % (cs, ip))) else: tofile.write(('start_addr = %r\n' % self.start_addr)) addresses = dict_keys(self._buf) if addresses: addresses.sort() minaddr = addresses[0] maxaddr = addresses[(- 1)] startaddr = ((minaddr // width) * width) endaddr = (((maxaddr // width) + 1) * width) maxdigits = max((len(hex(endaddr)) - 2), 4) templa = ('%%0%dX' % maxdigits) rangewidth = range_l(width) if withpadding: pad = self.padding else: pad = None for i in range_g(startaddr, endaddr, width): tofile.write((templa % i)) tofile.write(' ') s = [] for j in rangewidth: x = self._buf.get((i + j), pad) if (x is not None): tofile.write((' %02X' % x)) if (32 <= x < 127): s.append(chr(x)) else: s.append('.') else: tofile.write(' --') s.append(' ') tofile.write(((' |' + ''.join(s)) + '|\n')) def merge(self, other, overlap='error'): if (not isinstance(other, IntelHex)): raise TypeError('other should be IntelHex object') if (other is self): raise ValueError("Can't merge itself") if (overlap not in ('error', 'ignore', 'replace')): raise ValueError("overlap argument should be either 'error', 'ignore' or 'replace'") this_buf = self._buf other_buf = other._buf for i in other_buf: if (i in this_buf): if (overlap == 'error'): raise AddressOverlapError(('Data overlapped at address 0x%X' % i)) elif (overlap == 'ignore'): continue this_buf[i] = other_buf[i] if (self.start_addr != other.start_addr): if (self.start_addr is None): self.start_addr = other.start_addr elif (other.start_addr is None): pass elif (overlap == 'error'): raise AddressOverlapError('Starting addresses are different') elif (overlap == 'replace'): self.start_addr = other.start_addr def segments(self, min_gap=1): addresses = self.addresses() if (not addresses): return [] elif (len(addresses) == 1): return [(addresses[0], (addresses[0] + 1))] adjacent_differences = [(b - a) for (a, b) in zip(addresses[:(- 1)], addresses[1:])] breaks = [i for (i, x) in enumerate(adjacent_differences) if (x > min_gap)] endings = [addresses[b] for b in breaks] endings.append(addresses[(- 1)]) beginnings = [addresses[(b + 1)] for b in breaks] beginnings.insert(0, addresses[0]) return [(a, (b + 1)) for (a, b) in zip(beginnings, endings)] def get_memory_size(self): n = sys.getsizeof(self) n += sys.getsizeof(self.padding) n += total_size(self.start_addr) n += total_size(self._buf) n += sys.getsizeof(self._offset) return n
class NamedParamProposal(CompletionProposal): def __init__(self, name, function): self.argname = name name = ('%s=' % name) super().__init__(name, 'parameter_keyword') self._function = function def get_default(self): definfo = functionutils.DefinitionInfo.read(self._function) for (arg, default) in definfo.args_with_defaults: if (self.argname == arg): return default return None
class DataTrainingArguments(): dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'}) validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"}) block_size: Optional[int] = field(default=None, metadata={'help': 'Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training. Default to the model max input length for single sentence inputs (take into account special tokens).'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) line_by_line: bool = field(default=False, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) keep_linebreaks: bool = field(default=True, metadata={'help': 'Whether to keep line breaks when using TXT files or not.'}) def __post_init__(self): if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)): raise ValueError('Need either a dataset name or a training/validation file.') else: if (self.train_file is not None): extension = self.train_file.split('.')[(- 1)] assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, a json or a txt file.' if (self.validation_file is not None): extension = self.validation_file.split('.')[(- 1)] assert (extension in ['csv', 'json', 'txt']), '`validation_file` should be a csv, a json or a txt file.'
def minimum(left, right): (left, right) = _simplify_elementwise_binary_broadcasts(left, right) out = _simplified_binary_broadcast_concatenation(left, right, minimum) if (out is not None): return out mode = pybamm.settings.min_max_mode k = pybamm.settings.min_max_smoothing if ((mode == 'exact') or (left.is_constant() and right.is_constant())): out = Minimum(left, right) elif (mode == 'smooth'): out = pybamm.smooth_min(left, right, k) else: out = pybamm.softminus(left, right, k) return pybamm.simplify_if_constant(out)
class MyUnit(AutoUnit[Batch]): def __init__(self, *, tb_logger: TensorBoardLogger, train_accuracy: MulticlassAccuracy, log_every_n_steps: int, lr: float, gamma: float, module: torch.nn.Module, device: torch.device, strategy: str, precision: Optional[str], gradient_accumulation_steps: int, detect_anomaly: bool, clip_grad_norm: float) -> None: super().__init__(module=module, device=device, strategy=strategy, precision=precision, gradient_accumulation_steps=gradient_accumulation_steps, detect_anomaly=detect_anomaly, clip_grad_norm=clip_grad_norm) self.tb_logger = tb_logger self.lr = lr self.gamma = gamma self.train_accuracy = train_accuracy self.log_every_n_steps = log_every_n_steps def configure_optimizers_and_lr_scheduler(self, module: torch.nn.Module) -> Tuple[(torch.optim.Optimizer, Optional[TLRScheduler])]: optimizer = Adadelta(module.parameters(), lr=self.lr) lr_scheduler = StepLR(optimizer, step_size=1, gamma=self.gamma) return (optimizer, lr_scheduler) def compute_loss(self, state: State, data: Batch) -> Tuple[(torch.Tensor, torch.Tensor)]: (inputs, targets) = data outputs = self.module(inputs) outputs = torch.squeeze(outputs) loss = torch.nn.functional.nll_loss(outputs, targets) return (loss, outputs) def on_train_step_end(self, state: State, data: Batch, step: int, results: TrainStepResults) -> None: (loss, outputs) = (results.loss, results.outputs) (_, targets) = data self.train_accuracy.update(outputs, targets) if ((step % self.log_every_n_steps) == 0): accuracy = self.train_accuracy.compute() self.tb_logger.log('accuracy', accuracy, step) self.tb_logger.log('loss', loss, step) def on_train_epoch_end(self, state: State) -> None: super().on_train_epoch_end(state) self.train_accuracy.reset() def on_eval_step_end(self, state: State, data: Batch, step: int, loss: torch.Tensor, outputs: torch.Tensor) -> None: if ((step % self.log_every_n_steps) == 0): self.tb_logger.log('evaluation loss', loss, step)
class TestHVUDataset(BaseTestDataset): def test_hvu_dataset(self): hvu_frame_dataset = HVUDataset(ann_file=self.hvu_frame_ann_file, pipeline=self.frame_pipeline, tag_categories=self.hvu_categories, tag_category_nums=self.hvu_category_nums, filename_tmpl=self.filename_tmpl, data_prefix=self.data_prefix, start_index=1) hvu_frame_infos = hvu_frame_dataset.video_infos frame_dir = osp.join(self.data_prefix, 'imgs') assert (hvu_frame_infos == ([dict(frame_dir=frame_dir, total_frames=5, label=dict(concept=[250, 131, 42, 51, 57, 155, 122], object=[1570, 508], event=[16], action=[180], scene=[206]), categories=self.hvu_categories, category_nums=self.hvu_category_nums, filename_tmpl=self.filename_tmpl, start_index=1, modality='RGB')] * 2)) hvu_video_dataset = HVUDataset(ann_file=self.hvu_video_ann_file, pipeline=self.video_pipeline, tag_categories=self.hvu_categories, tag_category_nums=self.hvu_category_nums, data_prefix=self.data_prefix) hvu_video_infos = hvu_video_dataset.video_infos filename = osp.join(self.data_prefix, 'tmp.mp4') assert (hvu_video_infos == ([dict(filename=filename, label=dict(concept=[250, 131, 42, 51, 57, 155, 122], object=[1570, 508], event=[16], action=[180], scene=[206]), categories=self.hvu_categories, category_nums=self.hvu_category_nums)] * 2)) hvu_video_eval_dataset = HVUDataset(ann_file=self.hvu_video_eval_ann_file, pipeline=self.video_pipeline, tag_categories=self.hvu_categories_for_eval, tag_category_nums=self.hvu_category_nums_for_eval, data_prefix=self.data_prefix) results = [np.array([(- 1.), 0., 1., 0., 1., (- 0.), 0.0637848, 0., (- 1.)]), np.array([0., 1., 0., 0., (- 2.), 1., 1., 0., (- 0.)])] mAP = hvu_video_eval_dataset.evaluate(results) assert_array_almost_equal(mAP['action_mAP'], 1.0) assert_array_almost_equal(mAP['scene_mAP'], 0.5) assert_array_almost_equal(mAP['object_mAP'], 0.75)
def _load_dump_repr_configs(flags, model_parser): assert flags.repr_set_name assert (flags.which_repr in ['model_mu1', 'mu1', 'mu2']) assert (flags.train_repr_wspec or flags.dev_repr_wspec or flags.test_repr_wspec) (exp_dir, set_name, model_conf, train_conf, dataset_conf) = _load_configs(flags, model_parser, is_train=False) dataset_conf = get_nonoverlap_ra_dataset_conf(dataset_conf) dump_repr_opts = {'which_repr': flags.which_repr} train_repr_wspec = flags.train_repr_wspec dev_repr_wspec = flags.dev_repr_wspec test_repr_wspec = flags.test_repr_wspec train_repr_id_map = (_load_id_map(flags.train_repr_id_map) if train_repr_wspec else None) dev_repr_id_map = (_load_id_map(flags.dev_repr_id_map) if dev_repr_wspec else None) test_repr_id_map = (_load_id_map(flags.test_repr_id_map) if test_repr_wspec else None) repr_set_name = flags.repr_set_name return (exp_dir, set_name, model_conf, train_conf, dataset_conf, repr_set_name, train_repr_id_map, dev_repr_id_map, test_repr_id_map, dump_repr_opts, train_repr_wspec, dev_repr_wspec, test_repr_wspec)
def mark_only_lora_as_trainable(model: nn.Module, bias: str='none') -> None: for (n, p) in model.named_parameters(): if ('lora_' not in n): p.requires_grad = False if (bias == 'none'): return elif (bias == 'all'): for (n, p) in model.named_parameters(): if ('bias' in n): p.requires_grad = True elif (bias == 'lora_only'): for m in model.modules(): if (isinstance(m, LoraLayer) and hasattr(m, 'bias') and (m.bias is not None)): m.bias.requires_grad = True else: raise NotImplementedError
_model_architecture('linformer_roberta', 'linformer_roberta') def base_architecture(args): args.compressed = getattr(args, 'compressed', 4) args.shared_kv_compressed = getattr(args, 'shared_kv_compressed', 0) args.shared_layer_kv_compressed = getattr(args, 'shared_layer_kv_compressed', 0) args.freeze_compress = getattr(args, 'freeze_compress', 0) roberta_base_architecture(args)
def test_use_before_definition(): with pytest.raises(SchemeException): m = run_mod('\n #lang pycket\n x\n (define x 1)\n ') with pytest.raises(SchemeException): m = run_mod('\n #lang pycket\n x\n (define x 1)\n (set! x 2)\n ')
class CategoricalGibbsMetropolis(ArrayStep): name = 'categorical_gibbs_metropolis' stats_dtypes_shapes = {'tune': (bool, [])} def __init__(self, vars, proposal='uniform', order='random', model=None): model = pm.modelcontext(model) vars = get_value_vars_from_user_vars(vars, model) initial_point = model.initial_point() dimcats = [] for v in vars: v_init_val = initial_point[v.name] rv_var = model.values_to_rvs[v] distr = getattr(rv_var.owner, 'op', None) if isinstance(distr, CategoricalRV): k_graph = rv_var.owner.inputs[3].shape[(- 1)] (k_graph,) = model.replace_rvs_by_values((k_graph,)) k = model.compile_fn(k_graph, inputs=model.value_vars, on_unused_input='ignore')(initial_point) elif isinstance(distr, BernoulliRV): k = 2 else: raise ValueError(('All variables must be categorical or binary' + 'for CategoricalGibbsMetropolis')) start = len(dimcats) dimcats += [(dim, k) for dim in range(start, (start + v_init_val.size))] if (order == 'random'): self.shuffle_dims = True self.dimcats = dimcats else: if (sorted(order) != list(range(len(dimcats)))): raise ValueError("Argument 'order' has to be a permutation") self.shuffle_dims = False self.dimcats = [dimcats[j] for j in order] if (proposal == 'uniform'): self.astep = self.astep_unif elif (proposal == 'proportional'): self.astep = self.astep_prop else: raise ValueError("Argument 'proposal' should either be 'uniform' or 'proportional'") self.tune = True super().__init__(vars, [model.compile_logp()]) def reset_tuning(self): return def astep_unif(self, apoint: RaveledVars, *args) -> Tuple[(RaveledVars, StatsType)]: logp = args[0] point_map_info = apoint.point_map_info q0 = apoint.data dimcats = self.dimcats if self.shuffle_dims: nr.shuffle(dimcats) q = RaveledVars(np.copy(q0), point_map_info) logp_curr = logp(q) for (dim, k) in dimcats: (curr_val, q.data[dim]) = (q.data[dim], sample_except(k, q.data[dim])) logp_prop = logp(q) (q.data[dim], accepted) = metrop_select((logp_prop - logp_curr), q.data[dim], curr_val) if accepted: logp_curr = logp_prop stats = {'tune': self.tune} return (q, [stats]) def astep_prop(self, apoint: RaveledVars, *args) -> Tuple[(RaveledVars, StatsType)]: logp = args[0] point_map_info = apoint.point_map_info q0 = apoint.data dimcats = self.dimcats if self.shuffle_dims: nr.shuffle(dimcats) q = RaveledVars(np.copy(q0), point_map_info) logp_curr = logp(q) for (dim, k) in dimcats: logp_curr = self.metropolis_proportional(q, logp, logp_curr, dim, k) return (q, []) def astep(self, apoint: RaveledVars, *args) -> Tuple[(RaveledVars, StatsType)]: raise NotImplementedError() def metropolis_proportional(self, q, logp, logp_curr, dim, k): given_cat = int(q.data[dim]) log_probs = np.zeros(k) log_probs[given_cat] = logp_curr candidates = list(range(k)) for candidate_cat in candidates: if (candidate_cat != given_cat): q.data[dim] = candidate_cat log_probs[candidate_cat] = logp(q) probs = scipy.special.softmax(log_probs, axis=0) (prob_curr, probs[given_cat]) = (probs[given_cat], 0.0) probs /= (1.0 - prob_curr) proposed_cat = nr.choice(candidates, p=probs) accept_ratio = ((1.0 - prob_curr) / (1.0 - probs[proposed_cat])) if ((not np.isfinite(accept_ratio)) or (nr.uniform() >= accept_ratio)): q.data[dim] = given_cat return logp_curr q.data[dim] = proposed_cat return log_probs[proposed_cat] def competence(var): distribution = getattr(var.owner, 'op', None) if isinstance(distribution, CategoricalRV): try: k = var.owner.inputs[3].shape[(- 1)].eval() if (k > 2): return Competence.IDEAL except MissingInputError: pass return Competence.COMPATIBLE if isinstance(distribution, BernoulliRV): return Competence.COMPATIBLE return Competence.INCOMPATIBLE
def matplotlib_plt(scatters, title, ylabel, output_file, limits=None, show=False, figsize=None): linestyle = '-' hybrid_matches = ['x26', 'VTM', 'HM', 'WebP', 'AV1'] if (figsize is None): figsize = (9, 6) (fig, ax) = plt.subplots(figsize=figsize) for sc in scatters: if any(((x in sc['name']) for x in hybrid_matches)): linestyle = '--' ax.plot(sc['xs'], sc['ys'], marker='.', linestyle=linestyle, linewidth=0.7, label=sc['name']) ax.set_xlabel('Bit-rate [kbps]') ax.set_ylabel(ylabel) ax.grid() if (limits is not None): ax.axis(limits) ax.legend(loc='lower right') if title: ax.title.set_text(title) if show: plt.show() if output_file: fig.savefig(output_file, dpi=300)
.parametrize('min_role,role_list,projects', prune_assignments) def test_prune_projects_output(db, settings, min_role, role_list, projects): (stdout, stderr) = (io.StringIO(), io.StringIO()) instances = Project.objects.filter(id__in=projects).all() call_command('prune_projects', '--min_role', min_role, stdout=stdout, stderr=stderr) assert (stdout.getvalue() == f'''Found projects without {role_list}: {get_prune_output(instances)}''') assert (not stderr.getvalue())
class ResNet50vd_samll(nn.Module): def __init__(self, cout=64, idx=0): super(ResNet50vd, self).__init__() self.cout = cout self.idx = idx self.resnet50vd = ResNet(channels=[32, 64, 128, 256], cout=cout, idx=idx, block=Bottleneck, layers=layers, stem_width=32, stem_type='deep', avg_down=True, bool_DeformableConv2d=False) def forward(self, x): x = self.resnet50vd(x) return x
def _change_state(state: EnvironmentState, new_node: GraphNode, dest_node: Node, add_changers: List[StateChanger]): changers = [AddNode(new_node), AddEdges(NodeInstance(new_node), Relation.ON, NodeInstance(dest_node)), AddEdges(NodeInstance(new_node), Relation.CLOSE, NodeInstance(dest_node), add_reverse=True)] changers.extend(add_changers) state.apply_changes(changers)
def unmarshal_webhook_response(request: WebhookRequest, response: Response, spec: SchemaPath, base_url: Optional[str]=None, cls: Optional[WebhookResponseUnmarshallerType]=None, **unmarshaller_kwargs: Any) -> ResponseUnmarshalResult: config = Config(server_base_url=base_url, webhook_response_unmarshaller_cls=(cls or _UNSET), **unmarshaller_kwargs) result = OpenAPI(spec, config=config).unmarshal_webhook_response(request, response) result.raise_for_errors() return result
def check_precommit_requirements() -> None: requirements_txt_requirements = get_txt_requirements() precommit_requirements = get_precommit_requirements() no_txt_entry_msg = 'All pre-commit requirements must also be listed in `requirements-tests.txt` (missing {requirement!r})' for (requirement, specifier) in precommit_requirements.items(): if (requirement in {'ruff-pre-commit', 'black-pre-commit-mirror'}): requirement = requirement.split('-')[0] assert (requirement in requirements_txt_requirements), no_txt_entry_msg.format(requirement=requirement) specifier_mismatch = f'Specifier "{specifier}" for {requirement!r} in `.pre-commit-config.yaml` does not match specifier "{requirements_txt_requirements[requirement]}" in `requirements-tests.txt`' assert (specifier == requirements_txt_requirements[requirement]), specifier_mismatch
class FlatSim(nn.Module): def __init__(self, x_size, y_size, opt={}, prefix='seqatt', dropout=None): super(FlatSim, self).__init__() assert (x_size == y_size) self.opt = opt self.weight_norm_on = opt.get('{}_weight_norm_on'.format(prefix), False) self.linear = nn.Linear((x_size * 3), 1) if self.weight_norm_on: self.linear = weight_norm(self.linear) if (dropout is None): self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0)) else: self.dropout = dropout def forward(self, x, y, x_mask): x = self.dropout(x) y = self.dropout(y) y = y.unsqueeze(1).expand_as(x) flat_x = torch.cat([x, y, (x * y)], 2).contiguous().view((x.size(0) * x.size(1)), (- 1)) flat_scores = self.linear(flat_x) scores = flat_scores.contiguous().view(x.size(0), (- 1)) scores.data.masked_fill_(x_mask.data, (- float('inf'))) return scores
def test(env, pg_reinforce, n=50): reward_list = [] dialogLen_list = [] success_list = [] for i_test in range(n): assert (len(pg_reinforce.reward_buffer) == 0) (cur_reward, cur_dialogLen, cur_success) = run_one_dialog(env, pg_reinforce) assert (cur_success is not None) reward_list.append(cur_reward) dialogLen_list.append(cur_dialogLen) success_list.append(int(cur_success)) return (reward_list, dialogLen_list, success_list)
(frozen=True) class DreadConfiguration(BaseConfiguration): teleporters: DreadTeleporterConfiguration energy_per_tank: int = dataclasses.field(metadata={'min': 1, 'max': 1000, 'precision': 1}) immediate_energy_parts: bool hanubia_shortcut_no_grapple: bool hanubia_easier_path_to_itorash: bool x_starts_released: bool raven_beak_damage_table_handling: DreadRavenBeakDamageMode allow_highly_dangerous_logic: bool nerf_power_bombs: bool april_fools_hints: bool artifacts: DreadArtifactConfig constant_heat_damage: (int | None) = dataclasses.field(metadata={'min': 0, 'max': 1000, 'precision': 1}) constant_cold_damage: (int | None) = dataclasses.field(metadata={'min': 0, 'max': 1000, 'precision': 1}) constant_lava_damage: (int | None) = dataclasses.field(metadata={'min': 0, 'max': 1000, 'precision': 1}) def game_enum(cls) -> RandovaniaGame: return RandovaniaGame.METROID_DREAD def unsupported_features(self) -> list[str]: result = super().unsupported_features() gd = default_database.game_description_for(self.game) for trick in gd.resource_database.trick: if (trick.hide_from_ui and (self.trick_level.level_for_trick(trick) != LayoutTrickLevel.DISABLED)): result.append(f'Enabled {trick.long_name}') return result
class Migration(migrations.Migration): dependencies = [('petition', '0004_auto__0002')] operations = [migrations.AlterField(model_name='petition', name='title', field=tinymce.models.HTMLField(verbose_name='Title')), migrations.AlterField(model_name='signature', name='confirmed', field=models.BooleanField(default=False, verbose_name='Confirmed')), migrations.AlterField(model_name='signature', name='date', field=models.DateTimeField(auto_now_add=True, verbose_name='Date')), migrations.AlterField(model_name='signature', name='email', field=models.EmailField(max_length=254, verbose_name='Email address')), migrations.AlterField(model_name='signature', name='first_name', field=models.CharField(max_length=50, verbose_name='First name')), migrations.AlterField(model_name='signature', name='last_name', field=models.CharField(max_length=50, verbose_name='Last name')), migrations.AlterField(model_name='signature', name='petition', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='petition.Petition', verbose_name='Petition')), migrations.AlterField(model_name='signature', name='phone', field=models.CharField(blank=True, max_length=20, verbose_name='Phone number')), migrations.AlterField(model_name='signature', name='subscribed_to_mailinglist', field=models.BooleanField(default=False, verbose_name='Subscribed to mailing list'))]
class AIFF(FileType): _mimes = ['audio/aiff', 'audio/x-aiff'] def score(filename, fileobj, header): filename = filename.lower() return ((((header.startswith(b'FORM') * 2) + endswith(filename, b'.aif')) + endswith(filename, b'.aiff')) + endswith(filename, b'.aifc')) def add_tags(self): if (self.tags is None): self.tags = _IFFID3() else: raise error('an ID3 tag already exists') _error(IOError, error) () def load(self, filething, **kwargs): fileobj = filething.fileobj try: self.tags = _IFFID3(fileobj, **kwargs) except ID3NoHeaderError: self.tags = None except ID3Error as e: raise error(e) else: self.tags.filename = self.filename fileobj.seek(0, 0) self.info = AIFFInfo(fileobj)
def compare_wyckoffs(num1, num2, dim=3): from numpy import allclose if (num1 == '???'): print('Error: invalid value for num1 passed to compare_wyckoffs') return if (num2 == '???'): return False if (dim == 3): from pyxtal.symmetry import get_wyckoffs g1 = get_wyckoffs(num1)[0] g2 = get_wyckoffs(num2)[0] elif (dim == 2): from pyxtal.symmetry import get_layer g1 = get_layer(num1)[0] g2 = get_layer(num2)[0] elif (dim == 1): from pyxtal.symmetry import get_rod g1 = get_rod(num1)[0] g2 = get_rod(num2)[0] elif (dim == 0): from pyxtal.symmetry import get_point g1 = get_point(num1)[0] g2 = get_point(num2)[0] if (len(g2) > len(g1)): return True for (i, op2) in enumerate(g2): op1 = g1[i] m1 = op1.rotation_matrix m2 = op2.rotation_matrix if (not allclose(m1, m2)): return False return True
class ByteBuffer(): def __init__(self, chunk_size=65536): self._deque = collections.deque([bytearray()]) self._chunk_size = chunk_size self._size = 0 def append(self, data): pos = 0 while (pos < len(data)): data_to_write = min((self._chunk_size - len(self._deque[(- 1)])), (len(data) - pos)) self._deque[(- 1)].extend(data[pos:(pos + data_to_write)]) if (len(self._deque[(- 1)]) == self._chunk_size): self._deque.append(bytearray()) pos += data_to_write self._size += data_to_write def popleft(self, amount): if (amount > self._size): raise ValueError('Trying to extract {} bytes from ByteBuffer of length {}'.format(amount, self._size)) data = bytearray() while (amount > 0): data_to_read = min(amount, len(self._deque[0])) data.extend(self._deque[0][:data_to_read]) self._deque[0] = self._deque[0][data_to_read:] if ((len(self._deque[0]) == 0) and (len(self._deque) > 1)): self._deque.popleft() amount -= data_to_read self._size -= data_to_read return bytes(data) def __len__(self): return self._size
class Version(Base): def export_version(self) -> Optional[semantic_version.Version]: payload = self._initialize_payload('version') resp = None redcap_version = self._call_api(payload, return_type='str') if semantic_version.validate(redcap_version): resp = semantic_version.Version(redcap_version) return resp
def handle_set_suction(req): try: if req.data: ser.write(b'g') message = 'Turned on' else: ser.write(b's') message = 'Turned off' except Exception as e: return SetBoolResponse(success=False, message=str(e)) return SetBoolResponse(success=True, message=message)
def aggregate(epochs, uuid, start_time, train_time, w_compressed): global g_start_time global g_train_time global g_train_global_model global g_train_global_model_compressed global g_train_global_model_version global global_model_hash logger.debug('Received a train_ready.') lock.acquire() key = ((str(uuid) + '-') + str(epochs)) g_start_time[key] = start_time g_train_time[key] = train_time lock.release() if ((args.ddos_duration == 0) or (args.ddos_duration > g_train_global_model_version)): logger.debug('Mimic the aggregator under DDoS attacks!') if (random.random() < args.ddos_no_response_percent): logger.debug('Unfortunately, the aggregator does not response to the local update gradients') lock.acquire() g_train_global_model_version += 1 lock.release() return logger.debug('Aggregate global model after received a new local model.') w_local = utils.util.decompress_tensor(w_compressed) if (g_train_global_model is not None): w_glob = FadeFedAvg(g_train_global_model, w_local, 1.0) intermediate_acc_record(w_glob) g_train_global_model_compressed = utils.util.compress_tensor(w_glob) lock.acquire() g_train_global_model = w_glob g_train_global_model_version += 1 lock.release() global_model_hash = utils.util.generate_md5_hash(w_glob) logger.debug(('As a committee leader, calculate new global model hash: ' + global_model_hash))
def find_all_batch_norms_to_fold(connected_graph: ConnectedGraph) -> Tuple[(List[Tuple[(NodeProto, NodeProto)]], List[Tuple[(NodeProto, NodeProto)]])]: conv_linear_bn_activation_info_dict = _find_conv_bn_pairs(connected_graph) model = connected_graph.model bn_picked_for_folding = set() ordered_conv_fc_nodes = get_ordered_conv_linears(connected_graph) conv_bn_pairs = [] for node in ordered_conv_fc_nodes: if (node in conv_linear_bn_activation_info_dict.keys()): bn_info = conv_linear_bn_activation_info_dict[node] if (bn_info.output_bn and (bn_info.output_bn not in bn_picked_for_folding)): if is_valid_bn_fold(node.get_module(), model, True): conv_bn_pairs.append((node.get_module(), bn_info.output_bn.get_module())) bn_picked_for_folding.add(bn_info.output_bn) else: logger.info('...... invalid combination to fold %s', [node.name, bn_info.output_bn.name]) bn_conv_pairs = [] for node in ordered_conv_fc_nodes: if (node in conv_linear_bn_activation_info_dict.keys()): bn_info = conv_linear_bn_activation_info_dict[node] if (bn_info.input_bn and (bn_info.input_bn not in bn_picked_for_folding)): if is_valid_bn_fold(node.get_module(), model, False): bn_conv_pairs.append((bn_info.input_bn.get_module(), node.get_module())) bn_picked_for_folding.add(bn_info.input_bn) else: logger.info('...... invalid combination to fold %s', [bn_info.input_bn.name, node.name]) return (conv_bn_pairs, bn_conv_pairs)
(Participant) class ParticipantAdmin(admin.ModelAdmin): form = ParticipantForm list_display = ('user_display_name', 'conference') list_filter = ('conference',) fieldsets = ((None, {'fields': ('conference', 'user', 'photo', 'photo_preview', 'bio', 'website', 'twitter_handle', 'instagram_handle', 'linkedin_url', 'facebook_url', 'mastodon_handle', 'speaker_level', 'previous_talk_video')}),) readonly_fields = ('photo_preview', 'user_display_name') autocomplete_fields = ('user',) def user_display_name(self, obj): if obj: return obj.user.display_name def photo_preview(self, obj): if obj: return mark_safe(f'<img src="{obj.photo}" width="200" />')
class DataLoaderIter(object): def __init__(self, loader): self.dataset = loader.dataset self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = loader.pin_memory self.done_event = threading.Event() self.sample_iter = iter(self.batch_sampler) if (self.num_workers > 0): self.index_queue = multiprocessing.SimpleQueue() self.data_queue = multiprocessing.SimpleQueue() self.batches_outstanding = 0 self.shutdown = False self.send_idx = 0 self.rcvd_idx = 0 self.reorder_dict = {} self.workers = [multiprocessing.Process(target=_worker_loop, args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn, np.random.randint(0, , dtype='uint32'))) for _ in range(self.num_workers)] for w in self.workers: w.daemon = True w.start() if self.pin_memory: in_data = self.data_queue self.data_queue = queue.Queue() self.pin_thread = threading.Thread(target=_pin_memory_loop, args=(in_data, self.data_queue, self.done_event)) self.pin_thread.daemon = True self.pin_thread.start() for _ in range((2 * self.num_workers)): self._put_indices() def __len__(self): return len(self.batch_sampler) def __next__(self): if (self.num_workers == 0): indices = next(self.sample_iter) batch = self.collate_fn([self.dataset[i] for i in indices]) if self.pin_memory: batch = pin_memory_batch(batch) return batch if (self.rcvd_idx in self.reorder_dict): batch = self.reorder_dict.pop(self.rcvd_idx) return self._process_next_batch(batch) if (self.batches_outstanding == 0): self._shutdown_workers() raise StopIteration while True: assert ((not self.shutdown) and (self.batches_outstanding > 0)) (idx, batch) = self.data_queue.get() self.batches_outstanding -= 1 if (idx != self.rcvd_idx): self.reorder_dict[idx] = batch continue return self._process_next_batch(batch) next = __next__ def __iter__(self): return self def _put_indices(self): assert (self.batches_outstanding < (2 * self.num_workers)) indices = next(self.sample_iter, None) if (indices is None): return self.index_queue.put((self.send_idx, indices)) self.batches_outstanding += 1 self.send_idx += 1 def _process_next_batch(self, batch): self.rcvd_idx += 1 self._put_indices() if isinstance(batch, ExceptionWrapper): raise batch.exc_type(batch.exc_msg) return batch def __getstate__(self): raise NotImplementedError('DataLoaderIterator cannot be pickled') def _shutdown_workers(self): if (not self.shutdown): self.shutdown = True self.done_event.set() for _ in self.workers: self.index_queue.put(None) def __del__(self): if (self.num_workers > 0): self._shutdown_workers()
def test_cmdloop_without_rawinput(): testargs = ['prog'] with mock.patch.object(sys, 'argv', testargs): app = CreateOutsimApp() app.use_rawinput = False app.echo = False app.intro = 'Hello World, this is an intro ...' m = mock.MagicMock(name='input', return_value='quit') builtins.input = m expected = (app.intro + '\n') with pytest.raises(OSError): app.cmdloop() out = app.stdout.getvalue() assert (out == expected)
class BaseOptions(): def __init__(self): self.initialized = False def initialize(self, parser): parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size') parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size') parser.add_argument('--input_nc', type=int, default=1, help='# of input image channels') parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') parser.add_argument('--which_model_netD', type=str, default='basic', help='selects model to use for netD') parser.add_argument('--which_model_netG', type=str, default='resnet_9blocks', help='selects model to use for netG') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]') parser.add_argument('--model', type=str, default='motion', help='chooses which model to use. motion, test') parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--display_winsize', type=int, default=256, help='display window size') parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') parser.add_argument('--display_server', type=str, default=' help='visdom server of the web display') parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}') parser.add_argument('--crop', metavar='pixels', type=int, default=None, help='patch size') parser.add_argument('--evaluation', action='store_true', default=False, help='Argument to train model (default: False)') parser.add_argument('--rotate', metavar='rad', type=float, default=((5 * 3.) / 180), help='random rotate') parser.add_argument('--spacing', metavar='mm', type=float, default=None, help='resize image to specific voxel spacing') parser.add_argument('--border_ratio', metavar='ratio (fraction)', type=float, default=(5.0 / 180), help='set image border ratio, border will be cropped randomly') parser.add_argument('--classes', metavar='0 C1 ... CN', type=int, default=[0, 1], nargs='*', help='classes to clasify') self.initialized = True return parser def gather_options(self): if (not self.initialized): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) (opt, _) = parser.parse_known_args() model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) (opt, _) = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args() def print_options(self, opt): message = '' message += ' Options \n' for (k, v) in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if (v != default): comment = ('\t[default: %s]' % str(default)) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += ' End ' print(message) expr_dir = os.path.join(opt.checkpoints_dir, opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n') def parse(self): opt = self.gather_options() opt.isTrain = self.isTrain if opt.suffix: suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '') opt.name = (opt.name + suffix) self.print_options(opt) str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if (id >= 0): opt.gpu_ids.append(id) if (len(opt.gpu_ids) > 0): torch.cuda.set_device(opt.gpu_ids[0]) self.opt = opt return self.opt
class Defaults(): __slots__ = ('_tzinfo', '_disable_web_page_preview', '_block', '_quote', '_disable_notification', '_allow_sending_without_reply', '_parse_mode', '_api_defaults', '_protect_content') def __init__(self, parse_mode: Optional[str]=None, disable_notification: Optional[bool]=None, disable_web_page_preview: Optional[bool]=None, quote: Optional[bool]=None, tzinfo: datetime.tzinfo=UTC, block: bool=True, allow_sending_without_reply: Optional[bool]=None, protect_content: Optional[bool]=None): self._parse_mode: Optional[str] = parse_mode self._disable_notification: Optional[bool] = disable_notification self._disable_web_page_preview: Optional[bool] = disable_web_page_preview self._allow_sending_without_reply: Optional[bool] = allow_sending_without_reply self._quote: Optional[bool] = quote self._tzinfo: datetime.tzinfo = tzinfo self._block: bool = block self._protect_content: Optional[bool] = protect_content self._api_defaults = {} for kwarg in ('parse_mode', 'explanation_parse_mode', 'disable_notification', 'disable_web_page_preview', 'allow_sending_without_reply', 'protect_content'): value = getattr(self, kwarg) if (value is not None): self._api_defaults[kwarg] = value def __hash__(self) -> int: return hash((self._parse_mode, self._disable_notification, self._disable_web_page_preview, self._allow_sending_without_reply, self._quote, self._tzinfo, self._block, self._protect_content)) def __eq__(self, other: object) -> bool: if isinstance(other, Defaults): return all(((getattr(self, attr) == getattr(other, attr)) for attr in self.__slots__)) return False def api_defaults(self) -> Dict[(str, Any)]: return self._api_defaults def parse_mode(self) -> Optional[str]: return self._parse_mode _mode.setter def parse_mode(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to parse_mode after initialization.') def explanation_parse_mode(self) -> Optional[str]: return self._parse_mode _parse_mode.setter def explanation_parse_mode(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to explanation_parse_mode after initialization.') def disable_notification(self) -> Optional[bool]: return self._disable_notification _notification.setter def disable_notification(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to disable_notification after initialization.') def disable_web_page_preview(self) -> Optional[bool]: return self._disable_web_page_preview _web_page_preview.setter def disable_web_page_preview(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to disable_web_page_preview after initialization.') def allow_sending_without_reply(self) -> Optional[bool]: return self._allow_sending_without_reply _sending_without_reply.setter def allow_sending_without_reply(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to allow_sending_without_reply after initialization.') def quote(self) -> Optional[bool]: return self._quote def quote(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to quote after initialization.') def tzinfo(self) -> datetime.tzinfo: return self._tzinfo def tzinfo(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to tzinfo after initialization.') def block(self) -> bool: return self._block def block(self, value: object) -> NoReturn: raise AttributeError('You can not assign a new value to block after initialization.') def protect_content(self) -> Optional[bool]: return self._protect_content _content.setter def protect_content(self, value: object) -> NoReturn: raise AttributeError("You can't assign a new value to protect_content after initialization.")
class HT_CONV(nn.Module): def __init__(self, inplanes, outplanes): super(HT_CONV, self).__init__() self.conv1 = nn.Sequential(*make_conv2d_block(inplanes, inplanes, kernel_size=(9, 1), padding=(4, 0), bias=True, groups=inplanes)) self.block1 = HTCONVBlock(inplanes, inplanes) self.block2 = HTCONVBlock(inplanes, outplanes) self.relu = nn.ReLU(inplace=True) init_ht_convs(self.conv1[0], laplacian_init=True) init_ht_convs(self.block1.conv1, laplacian_init=False) init_ht_convs(self.block1.conv2, laplacian_init=False) init_ht_convs(self.block2.conv1, laplacian_init=False) init_ht_convs(self.block2.conv2, laplacian_init=False) def forward(self, x): x = self.conv1(x) x = self.block1(x) x = self.block2(x) return x
class SwitchModel(nn.Module): def __init__(self, args: Namespace, device: torch.device): super(SwitchModel, self).__init__() self.modelid = 'switch_baseline' self.args = args self.device = device self._encoder = PLM(args, device, use_encoder=True, pooler_output=False) self._pointer = PointerNetwork(args, device) self._lm_dropout = nn.Dropout(args.dropout) def forward(self, input: torch.Tensor, attention_mask: torch.Tensor=None, need_mask: bool=False): encoded = self._encoder(input, attention_mask=attention_mask) encoded = self._lm_dropout(encoded) pointer_ret = self._pointer(encoded, attention_mask, need_mask) if need_mask: (pointer_logits, masks) = pointer_ret return (pointer_logits, masks) else: pointer_logits = pointer_ret return pointer_logits
def fcn(split, tops): n = caffe.NetSpec() (n.data, n.label) = L.Python(module='nyud_layers', layer='NYUDSegDataLayer', ntop=2, param_str=str(dict(nyud_dir='../data/nyud', split=split, tops=tops, seed=1337))) (n.conv1_1, n.relu1_1) = conv_relu(n.data, 64, pad=100) (n.conv1_2, n.relu1_2) = conv_relu(n.relu1_1, 64) n.pool1 = max_pool(n.relu1_2) (n.conv2_1, n.relu2_1) = conv_relu(n.pool1, 128) (n.conv2_2, n.relu2_2) = conv_relu(n.relu2_1, 128) n.pool2 = max_pool(n.relu2_2) (n.conv3_1, n.relu3_1) = conv_relu(n.pool2, 256) (n.conv3_2, n.relu3_2) = conv_relu(n.relu3_1, 256) (n.conv3_3, n.relu3_3) = conv_relu(n.relu3_2, 256) n.pool3 = max_pool(n.relu3_3) (n.conv4_1, n.relu4_1) = conv_relu(n.pool3, 512) (n.conv4_2, n.relu4_2) = conv_relu(n.relu4_1, 512) (n.conv4_3, n.relu4_3) = conv_relu(n.relu4_2, 512) n.pool4 = max_pool(n.relu4_3) (n.conv5_1, n.relu5_1) = conv_relu(n.pool4, 512) (n.conv5_2, n.relu5_2) = conv_relu(n.relu5_1, 512) (n.conv5_3, n.relu5_3) = conv_relu(n.relu5_2, 512) n.pool5 = max_pool(n.relu5_3) (n.fc6, n.relu6) = conv_relu(n.pool5, 4096, ks=7, pad=0) n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True) (n.fc7, n.relu7) = conv_relu(n.drop6, 4096, ks=1, pad=0) n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True) n.score_fr = L.Convolution(n.drop7, num_output=40, kernel_size=1, pad=0, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)]) n.upscore = L.Deconvolution(n.score_fr, convolution_param=dict(num_output=40, kernel_size=64, stride=32, bias_term=False), param=[dict(lr_mult=0)]) n.score = crop(n.upscore, n.data) n.loss = L.SoftmaxWithLoss(n.score, n.label, loss_param=dict(normalize=False, ignore_label=255)) return n.to_proto()
class IPortUser(metaclass=ABCMeta): ID_PULSE = 1 ID_UPDATE = (ID_PULSE << 1) ID_DONE = (ID_PULSE << 2) ID_ERROR = (ID_PULSE << 3) PROCESS_IMPORT = (ID_PULSE << 4) PROCESS_EXPORT = (ID_PULSE << 5) def on_port_processing(self, action, data=None): pass def on_port_process_start(self): pass
.parametrize('sampler', [sample_blackjax_nuts, sample_numpyro_nuts]) .parametrize('random_seed', (None, 123)) .parametrize('chains', [pytest.param(1), pytest.param(2, marks=pytest.mark.skipif((len(jax.devices()) < 2), reason='not enough devices'))]) def test_seeding(chains, random_seed, sampler): sample_kwargs = dict(tune=100, draws=5, chains=chains, random_seed=random_seed) with pm.Model() as m: pm.Normal('x', mu=0, sigma=1) result1 = sampler(**sample_kwargs) result2 = sampler(**sample_kwargs) all_equal = np.all((result1.posterior['x'] == result2.posterior['x'])) if (random_seed is None): assert (not all_equal) else: assert all_equal if (chains > 1): assert np.all((result1.posterior['x'].sel(chain=0) != result1.posterior['x'].sel(chain=1))) assert np.all((result2.posterior['x'].sel(chain=0) != result2.posterior['x'].sel(chain=1)))
def test_validate_blackbox(): validate.blackbox(macro.Blackbox(((0, 1),), (1,))) with pytest.raises(ValueError): validate.blackbox(macro.Blackbox(((0, 1),), (1, 0))) with pytest.raises(ValueError): validate.blackbox(macro.Blackbox(((0,), (0, 1)), (0, 1))) with pytest.raises(ValueError): validate.blackbox(macro.Blackbox(((0,), (1,)), (0,)))
def decode_terminated(data: bytes, encoding: str, strict: bool=True) -> Tuple[(str, bytes)]: codec_info = codecs.lookup(encoding) encoding = codec_info.name if (encoding in ('utf-8', 'iso8859-1')): index = data.find(b'\x00') if (index == (- 1)): res = (data.decode(encoding), b'') if strict: raise ValueError('not null terminated') else: return res return (data[:index].decode(encoding), data[(index + 1):]) decoder = codec_info.incrementaldecoder() r: List[str] = [] for (i, b) in enumerate(iterbytes(data)): c = decoder.decode(b) if (c == u'\x00'): return (u''.join(r), data[(i + 1):]) r.append(c) else: r.append(decoder.decode(b'', True)) if strict: raise ValueError('not null terminated') return (u''.join(r), b'')
def test_user_potential(): model = pymc.Model() with model: pymc.Normal('a', mu=0, sigma=1) called = [] class Potential(quadpotential.QuadPotentialDiag): def energy(self, x, velocity=None): called.append(1) return super().energy(x, velocity) pot = Potential(floatX([1])) with model: step = pymc.NUTS(potential=pot) with warnings.catch_warnings(): warnings.filterwarnings('ignore', '.*number of samples.*', UserWarning) pymc.sample(10, step=step, chains=1) assert called
class ClassificationLoss(torch.nn.Module): def __init__(self, label_size, class_weight=None, loss_type=LossType.SOFTMAX_CROSS_ENTROPY): super(ClassificationLoss, self).__init__() self.label_size = label_size self.loss_type = loss_type if (loss_type == LossType.SOFTMAX_CROSS_ENTROPY): self.criterion = torch.nn.CrossEntropyLoss(class_weight) elif (loss_type == LossType.SOFTMAX_FOCAL_CROSS_ENTROPY): self.criterion = FocalLoss(label_size, ActivationType.SOFTMAX) elif (loss_type == LossType.SIGMOID_FOCAL_CROSS_ENTROPY): self.criterion = FocalLoss(label_size, ActivationType.SIGMOID) elif (loss_type == LossType.BCE_WITH_LOGITS): self.criterion = torch.nn.BCEWithLogitsLoss() else: raise TypeError(('Unsupported loss type: %s. Supported loss type is: %s' % (loss_type, LossType.str()))) def forward(self, logits, target, use_hierar=False, is_multi=False, *argvs): device = logits.device if use_hierar: assert (self.loss_type in [LossType.BCE_WITH_LOGITS, LossType.SIGMOID_FOCAL_CROSS_ENTROPY]) if (not is_multi): target = torch.eye(self.label_size)[target].to(device) (hierar_penalty, hierar_paras, hierar_relations) = argvs[0:3] return (self.criterion(logits, target) + (hierar_penalty * self.cal_recursive_regularize(hierar_paras, hierar_relations, device))) else: if is_multi: assert (self.loss_type in [LossType.BCE_WITH_LOGITS, LossType.SIGMOID_FOCAL_CROSS_ENTROPY]) elif (self.loss_type not in [LossType.SOFTMAX_CROSS_ENTROPY, LossType.SOFTMAX_FOCAL_CROSS_ENTROPY]): target = torch.eye(self.label_size)[target].to(device) return self.criterion(logits, target) def cal_recursive_regularize(self, paras, hierar_relations, device='cpu'): recursive_loss = 0.0 for i in range(len(paras)): if (i not in hierar_relations): continue children_ids = hierar_relations[i] if (not children_ids): continue children_ids_list = torch.tensor(children_ids, dtype=torch.long).to(device) children_paras = torch.index_select(paras, 0, children_ids_list) parent_para = torch.index_select(paras, 0, torch.tensor(i).to(device)) parent_para = parent_para.repeat(children_ids_list.size()[0], 1) diff_paras = (parent_para - children_paras) diff_paras = diff_paras.view(diff_paras.size()[0], (- 1)) recursive_loss += ((1.0 / 2) * (torch.norm(diff_paras, p=2) ** 2)) return recursive_loss
class Reader(object): def __init__(self, data): if isinstance(data, list): self._str = data else: self._str = data.split('\n') self.reset() def __getitem__(self, n): return self._str[n] def reset(self): self._l = 0 def read(self): if (not self.eof()): out = self[self._l] self._l += 1 return out else: return '' def seek_next_non_empty_line(self): for l in self[self._l:]: if l.strip(): break else: self._l += 1 def eof(self): return (self._l >= len(self._str)) def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): return self[start:self._l] self._l += 1 if self.eof(): return self[start:(self._l + 1)] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() def is_empty(line): return (not line.strip()) return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): return (line.strip() and (len(line.lstrip()) == len(line))) return self.read_to_condition(is_unindented) def peek(self, n=0): if ((self._l + n) < len(self._str)): return self[(self._l + n)] else: return '' def is_empty(self): return (not ''.join(self._str).strip())
def parse_val_archive(root, file=None, wnids=None, folder='val'): archive_meta = ARCHIVE_META['val'] if (file is None): file = archive_meta[0] md5 = archive_meta[1] if (wnids is None): wnids = load_meta_file(root)[1] _verify_archive(root, file, md5) val_root = os.path.join(root, folder) extract_archive(os.path.join(root, file), val_root) images = sorted([os.path.join(val_root, image) for image in os.listdir(val_root)]) for wnid in set(wnids): os.mkdir(os.path.join(val_root, wnid)) for (wnid, img_file) in zip(wnids, images): shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file)))
class Relations(): def __init__(self, *args, **kwargs): self._num_relations_cached = None self._sum_phi_cached = None def sum_phi(self): if (self._sum_phi_cached is None): self._sum_phi_cached = self._sum_phi() return self._sum_phi_cached def num_relations(self): if (self._num_relations_cached is None): self._num_relations_cached = self._num_relations() return self._num_relations_cached def _repr_columns(self): return [(f'{fmt.SMALL_PHI}_r', self.sum_phi()), ('#(relations)', self.num_relations())]
def symmetric_gradients(tensor: torch.Tensor, grad: torch.Tensor, intermediate_result: IntermediateResult, channel_axis: int) -> Tuple[(torch.Tensor, torch.Tensor)]: mask_tensor = intermediate_result.mask_tensor delta = intermediate_result.delta offset = intermediate_result.offset x_quant = intermediate_result.x_quant num_steps = intermediate_result.num_steps if ((delta.numel() > 1) and (len(tensor.shape) == 1)): grad_encoding_max = (((x_quant + offset) * grad) - ((mask_tensor * (tensor / delta)) * grad)) else: dim = list(range(len(tensor.shape))) if ((delta.numel() > 1) and (len(tensor.shape) > 1)): dim.pop(channel_axis) grad_encoding_max = (((x_quant + offset) * grad).sum(dim=dim) - ((mask_tensor * (tensor / delta)) * grad).sum(dim=dim)) grad_encoding_max = (grad_encoding_max / torch.div(num_steps, 2, rounding_mode='floor')) grad_encoding_max = grad_encoding_max.view_as(intermediate_result.encoding_max) return ((- grad_encoding_max), grad_encoding_max)
class Deterministic(nn.Module): def __init__(self, net): super().__init__() self.net = net self.cpu_state = None self.cuda_in_fwd = None self.gpu_devices = None self.gpu_states = None def record_rng(self, *args): self.cpu_state = torch.get_rng_state() if torch.cuda._initialized: self.cuda_in_fwd = True (self.gpu_devices, self.gpu_states) = get_device_states(*args) def forward(self, *args, record_rng=False, set_rng=False, **kwargs): if record_rng: self.record_rng(*args) if (not set_rng): return self.net(*args, **kwargs) rng_devices = [] if self.cuda_in_fwd: rng_devices = self.gpu_devices with torch.random.fork_rng(devices=rng_devices, enabled=True): torch.set_rng_state(self.cpu_state) if self.cuda_in_fwd: set_device_states(self.gpu_devices, self.gpu_states) return self.net(*args, **kwargs)
def test_is_super(): test_type = TensorType(config.floatX, shape=(None, None)) test_type2 = TensorType(config.floatX, shape=(None, 1)) assert test_type.is_super(test_type) assert test_type.is_super(test_type2) assert (not test_type2.is_super(test_type)) test_type3 = TensorType(config.floatX, shape=(None, None, None)) assert (not test_type3.is_super(test_type))
class GroupViTOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) def outputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=(- 1), seq_length: int=(- 1), framework: Optional['TensorType']=None) -> Mapping[(str, Any)]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework) image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, batch_size=batch_size, framework=framework) return {**text_input_dict, **image_input_dict} def default_onnx_opset(self) -> int: return 14
def lexical_overlap(premise, hypothesis): prem_words = [] hyp_words = [] for word in premise.split(): if (word not in ['.', '?', '!']): prem_words.append(word.lower()) for word in hypothesis.split(): if (word not in ['.', '?', '!']): hyp_words.append(word.lower()) all_in = True for word in hyp_words: if (word not in prem_words): all_in = False break return all_in
def main_worker(gpu, opts): rank = ((opts.node_rank * opts.gpus) + gpu) torch.cuda.set_device(gpu) dist.init_process_group(backend='nccl', init_method='env://', world_size=opts.world_size, rank=rank, group_name='mtorch') set_seed(42) if (rank == 0): sys.stdout = Logger(os.path.join(opts.ckpt_path, 'log.txt')) C = np.load('kmeans_centers.npy') C = np.rint((127.5 * (C + 1.0))) C = torch.from_numpy(C) train_dataset = ImageNetDatasetMask(opts.data_path, C, mask_path=opts.mask_path, is_train=True, use_ImageFolder=opts.use_ImageFolder, image_size=opts.image_size, random_stroke=opts.random_stroke) test_dataset = ImageNetDatasetMask(opts.validation_path, C, mask_path=opts.mask_path, is_train=False, use_ImageFolder=opts.use_ImageFolder, image_size=opts.image_size) model_config = GPTConfig(train_dataset.vocab_size, train_dataset.block_size, embd_pdrop=0.0, resid_pdrop=0.0, attn_pdrop=0.0, n_layer=opts.n_layer, n_head=opts.n_head, n_embd=opts.n_embd, BERT=opts.BERT, use_gelu2=opts.GELU_2, dynamic_weight=opts.dynamic_weight) IGPT_model = GPT(model_config) tokens_per_epoch = (len(train_dataset.image_id_list) * train_dataset.block_size) train_epochs = opts.train_epoch train_config = TrainerConfig(max_epochs=train_epochs, batch_size=opts.batch_size, learning_rate=opts.lr, betas=(0.9, 0.95), weight_decay=0, lr_decay=True, warmup_tokens=(tokens_per_epoch / opts.world_size), final_tokens=((train_epochs * tokens_per_epoch) / opts.world_size), ckpt_path=opts.ckpt_path, num_workers=8, GPU_ids=opts.GPU_ids, BERT=opts.BERT, world_size=opts.world_size, AMP=opts.AMP, print_freq=opts.print_freq) trainer = Trainer(IGPT_model, train_dataset, test_dataset, train_config, gpu, rank) loaded_ckpt = trainer.load_checkpoint(opts.resume_ckpt) trainer.train(loaded_ckpt) print('Finish the training ...')
class GhostBatchNorm(BatchNorm): def __init__(self, num_features, num_splits=1, **kwargs): super().__init__(num_features, **kwargs) self.num_splits = num_splits self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, input): (N, C, H, W) = input.shape if (self.training or (not self.track_running_stats)): self.running_mean = self.running_mean.repeat(self.num_splits) self.running_var = self.running_var.repeat(self.num_splits) outputs = F.batch_norm(input.view((- 1), (C * self.num_splits), H, W), self.running_mean, self.running_var, self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits), True, self.momentum, self.eps).view(N, C, H, W) self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0) self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0) return outputs else: return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, self.momentum, self.eps)
class TestSamplePPC(): def test_normal_scalar(self): nchains = 2 ndraws = 500 with pm.Model() as model: mu = pm.Normal('mu', 0.0, 1.0) a = pm.Normal('a', mu=mu, sigma=1, observed=0.0) trace = pm.sample(draws=ndraws, chains=nchains) with model: ppc0 = pm.sample_posterior_predictive((10 * [model.initial_point()]), return_inferencedata=False) assert ('a' in ppc0) assert (len(ppc0['a'][0]) == 10) ppc = pm.sample_posterior_predictive(trace, var_names=[], return_inferencedata=False) assert (len(ppc) == 0) ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False) assert (ppc['a'].shape == (nchains, ndraws)) random_state = np.random.RandomState() idata_ppc = pm.sample_posterior_predictive(trace, var_names=['a'], random_seed=random_state) ppc = idata_ppc.posterior_predictive assert ('a' in ppc) assert (ppc['a'].shape == (nchains, ndraws)) (_, pval) = stats.kstest((ppc['a'] - trace.posterior['mu']).values.flatten(), stats.norm(loc=0, scale=1).cdf) assert (pval > 0.001) def test_normal_scalar_idata(self): nchains = 2 ndraws = 500 with pm.Model() as model: mu = pm.Normal('mu', 0.0, 1.0) a = pm.Normal('a', mu=mu, sigma=1, observed=0.0) with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'Tuning samples will be included.*', UserWarning) trace = pm.sample(draws=ndraws, chains=nchains, return_inferencedata=False, discard_tuned_samples=False) assert (not isinstance(trace, InferenceData)) with model: idata = pm.to_inference_data(trace) assert isinstance(idata, InferenceData) ppc = pm.sample_posterior_predictive(idata, return_inferencedata=False) assert (ppc['a'].shape == (nchains, ndraws)) def test_normal_vector(self): with pm.Model() as model: mu = pm.Normal('mu', 0.0, 1.0) a = pm.Normal('a', mu=mu, sigma=1, observed=np.array([0.5, 0.2])) trace = pm.sample(return_inferencedata=False, draws=12, chains=1) with model: ppc0 = pm.sample_posterior_predictive((10 * [model.initial_point()]), return_inferencedata=False) ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, var_names=[]) assert (len(ppc) == 0) ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False) assert (ppc['a'].shape == (trace.nchains, len(trace), 2)) assert (ppc0['a'].shape == (1, 10, 2)) def test_normal_vector_idata(self): with pm.Model() as model: mu = pm.Normal('mu', 0.0, 1.0) a = pm.Normal('a', mu=mu, sigma=1, observed=np.array([0.5, 0.2])) trace = pm.sample(return_inferencedata=False) assert (not isinstance(trace, InferenceData)) with model: idata = pm.to_inference_data(trace) assert isinstance(idata, InferenceData) ppc = pm.sample_posterior_predictive(idata, return_inferencedata=False) assert (ppc['a'].shape == (trace.nchains, len(trace), 2)) def test_exceptions(self): with pm.Model() as model: mu = pm.Normal('mu', 0.0, 1.0) a = pm.Normal('a', mu=mu, sigma=1, observed=np.array([0.5, 0.2])) idata = pm.sample(idata_kwargs={'log_likelihood': False}) with model: bad_trace = {'mu': stats.norm.rvs(size=1000)} with pytest.raises(TypeError, match='type for `trace`'): ppc = pm.sample_posterior_predictive(bad_trace) def test_sum_normal(self): with pm.Model() as model: a = pm.Normal('a', sigma=0.2) b = pm.Normal('b', mu=a) idata = pm.sample(draws=1000, chains=1) with model: ppc0 = pm.sample_posterior_predictive((10 * [model.initial_point()]), return_inferencedata=False) assert (ppc0 == {}) ppc = pm.sample_posterior_predictive(idata, return_inferencedata=False, var_names=['b']) assert (len(ppc) == 1) assert (ppc['b'].shape == (1, 1000)) scale = np.sqrt((1 + (0.2 ** 2))) (_, pval) = stats.kstest(ppc['b'].flatten(), stats.norm(scale=scale).cdf) assert (pval > 0.001) def test_model_not_drawable_prior(self, seeded_test): data = np.random.poisson(lam=10, size=200) model = pm.Model() with model: mu = pm.HalfFlat('sigma') pm.Poisson('foo', mu=mu, observed=data) with pytensor.config.change_flags(mode=fast_unstable_sampling_mode): with warnings.catch_warnings(): warnings.filterwarnings('ignore', '.*number of samples.*', UserWarning) idata = pm.sample(tune=10, draws=40, chains=1) with model: with pytest.raises(NotImplementedError) as excinfo: pm.sample_prior_predictive(50) assert ('Cannot sample' in str(excinfo.value)) samples = pm.sample_posterior_predictive(idata, return_inferencedata=False) assert (samples['foo'].shape == (1, 40, 200)) def test_model_shared_variable(self): rng = np.random.RandomState(9832) x = rng.randn(100) y = (x > 0) x_shared = pytensor.shared(x) y_shared = pytensor.shared(y) samples = 100 with pm.Model() as model: coeff = pm.Normal('x', mu=0, sigma=1) logistic = pm.Deterministic('p', pm.math.sigmoid((coeff * x_shared))) obs = pm.Bernoulli('obs', p=logistic, observed=y_shared) trace = pm.sample(samples, chains=1, return_inferencedata=False, compute_convergence_checks=False, random_seed=rng) x_shared.set_value([(- 1), 0, 1.0]) y_shared.set_value([0, 0, 0]) with model: post_pred = pm.sample_posterior_predictive(trace, return_inferencedata=False, var_names=['p', 'obs']) expected_p = np.array([[logistic.eval({coeff: val}) for val in trace['x'][:samples]]]) assert (post_pred['obs'].shape == (1, samples, 3)) npt.assert_allclose(post_pred['p'], expected_p) def test_deterministic_of_observed(self): rng = np.random.RandomState(8442) meas_in_1 = pm.pytensorf.floatX((2 + (4 * rng.randn(10)))) meas_in_2 = pm.pytensorf.floatX((5 + (4 * rng.randn(10)))) nchains = 2 with pm.Model() as model: mu_in_1 = pm.Normal('mu_in_1', 0, 2) sigma_in_1 = pm.HalfNormal('sd_in_1', 1) mu_in_2 = pm.Normal('mu_in_2', 0, 2) sigma_in_2 = pm.HalfNormal('sd__in_2', 1) in_1 = pm.Normal('in_1', mu_in_1, sigma_in_1, observed=meas_in_1) in_2 = pm.Normal('in_2', mu_in_2, sigma_in_2, observed=meas_in_2) out_diff = (in_1 + in_2) pm.Deterministic('out', out_diff) with pytensor.config.change_flags(mode=fast_unstable_sampling_mode): trace = pm.sample(tune=100, draws=100, chains=nchains, step=pm.Metropolis(), return_inferencedata=False, compute_convergence_checks=False, random_seed=rng) rtol = (1e-05 if (pytensor.config.floatX == 'float64') else 0.0001) ppc = pm.sample_posterior_predictive(return_inferencedata=False, model=model, trace=trace, random_seed=0, var_names=[var.name for var in (model.deterministics + model.basic_RVs)]) npt.assert_allclose((ppc['in_1'] + ppc['in_2']), ppc['out'], rtol=rtol) def test_deterministic_of_observed_modified_interface(self): rng = np.random.RandomState(4982) meas_in_1 = pm.pytensorf.floatX((2 + (4 * rng.randn(100)))) meas_in_2 = pm.pytensorf.floatX((5 + (4 * rng.randn(100)))) with pm.Model() as model: mu_in_1 = pm.Normal('mu_in_1', 0, 1, initval=0) sigma_in_1 = pm.HalfNormal('sd_in_1', 1, initval=1) mu_in_2 = pm.Normal('mu_in_2', 0, 1, initval=0) sigma_in_2 = pm.HalfNormal('sd__in_2', 1, initval=1) in_1 = pm.Normal('in_1', mu_in_1, sigma_in_1, observed=meas_in_1) in_2 = pm.Normal('in_2', mu_in_2, sigma_in_2, observed=meas_in_2) out_diff = (in_1 + in_2) pm.Deterministic('out', out_diff) with pytensor.config.change_flags(mode=fast_unstable_sampling_mode): trace = pm.sample(tune=100, draws=100, step=pm.Metropolis(), return_inferencedata=False, compute_convergence_checks=False, random_seed=rng) varnames = [v for v in trace.varnames if (v != 'out')] ppc_trace = [dict(zip(varnames, row)) for row in zip(*(trace.get_values(v) for v in varnames))] ppc = pm.sample_posterior_predictive(return_inferencedata=False, model=model, trace=ppc_trace, var_names=[x.name for x in (model.deterministics + model.basic_RVs)]) rtol = (1e-05 if (pytensor.config.floatX == 'float64') else 0.001) npt.assert_allclose((ppc['in_1'] + ppc['in_2']), ppc['out'], rtol=rtol) def test_variable_type(self): with pm.Model() as model: mu = pm.HalfNormal('mu', 1) a = pm.Normal('a', mu=mu, sigma=2, observed=np.array([1, 2])) b = pm.Poisson('b', mu, observed=np.array([1, 2])) with pytensor.config.change_flags(mode=fast_unstable_sampling_mode): trace = pm.sample(tune=10, draws=10, compute_convergence_checks=False, return_inferencedata=False) with model: ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False) assert (ppc['a'].dtype.kind == 'f') assert (ppc['b'].dtype.kind == 'i') def test_potentials_warning(self): warning_msg = 'The effect of Potentials on other parameters is ignored during' with pm.Model() as m: a = pm.Normal('a', 0, 1) p = pm.Potential('p', (a + 1)) obs = pm.Normal('obs', a, 1, observed=5) trace = az_from_dict({'a': np.random.rand(5)}) with m: with pytest.warns(UserWarning, match=warning_msg): pm.sample_posterior_predictive(trace) def test_idata_extension(self): with pm.Model() as model: mu = pm.Normal('mu', 0.0, 1.0) a = pm.Normal('a', mu=mu, sigma=1, observed=[0.0, 1.0]) idata = pm.sample(tune=10, draws=10, compute_convergence_checks=False) base_test_dict = {'posterior': ['mu', '~a'], 'sample_stats': ['diverging', 'lp'], 'observed_data': ['a']} test_dict = {'~posterior_predictive': [], '~predictions': [], **base_test_dict} fails = check_multiple_attrs(test_dict, idata) assert (not fails) with model: pm.sample_posterior_predictive(idata, extend_inferencedata=True) test_dict = {'posterior_predictive': ['a'], '~predictions': [], **base_test_dict} fails = check_multiple_attrs(test_dict, idata) assert (not fails) with model: pm.sample_posterior_predictive(idata, extend_inferencedata=True, predictions=True) test_dict = {'posterior_predictive': ['a'], 'predictions': ['a'], **base_test_dict} fails = check_multiple_attrs(test_dict, idata) assert (not fails) .parametrize('multitrace', [False, True]) def test_deterministics_out_of_idata(self, multitrace): draws = 10 chains = 2 coords = {'draw': range(draws), 'chain': range(chains)} ds = xr.Dataset({'a': xr.DataArray(([([0] * draws)] * chains), coords=coords, dims=['chain', 'draw'])}, coords=coords) with pm.Model() as m: a = pm.Normal('a') if multitrace: straces = [] for chain in ds.chain: strace = pm.backends.NDArray(model=m, vars=[a]) strace.setup(len(ds.draw), int(chain)) strace.values = {'a': ds.a.sel(chain=chain).data} strace.draw_idx = len(ds.draw) straces.append(strace) trace = MultiTrace(straces) else: trace = ds d = pm.Deterministic('d', (a - 4)) pm.Normal('c', d, sigma=0.01) ppc = pm.sample_posterior_predictive(trace, var_names='c', return_inferencedata=True) assert np.all((np.abs((ppc.posterior_predictive.c + 4)) <= 0.1)) def test_logging_sampled_basic_rvs_prior(self, caplog): with pm.Model() as m: x = pm.Normal('x') y = pm.Deterministic('y', (x + 1)) z = pm.Normal('z', y, observed=0) with m: pm.sample_prior_predictive(samples=1) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [x, z]')]) caplog.clear() with m: pm.sample_prior_predictive(samples=1, var_names=['x']) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [x]')]) caplog.clear() def test_logging_sampled_basic_rvs_posterior(self, caplog): with pm.Model() as m: x = pm.Normal('x') x_det = pm.Deterministic('x_det', (x + 1)) y = pm.Normal('y', x_det) z = pm.Normal('z', y, observed=0) idata = az_from_dict(posterior={'x': np.zeros(5), 'x_det': np.ones(5), 'y': np.ones(5)}) with m: pm.sample_posterior_predictive(idata) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [z]')]) caplog.clear() with m: pm.sample_posterior_predictive(idata, var_names=['y', 'z']) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [y, z]')]) caplog.clear() with m: pm.sample_posterior_predictive(idata, var_names=['x', 'z']) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [x, y, z]')]) caplog.clear() idata = az_from_dict(posterior={'x': np.zeros(5)}) with m: pm.sample_posterior_predictive(idata) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [y, z]')]) caplog.clear() idata = az_from_dict(posterior={'x': np.zeros(5), 'y': np.ones(5)}) with m: pm.sample_posterior_predictive(idata) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [z]')]) caplog.clear() idata = az_from_dict(posterior={'y': np.ones(5)}) with m: pm.sample_posterior_predictive(idata) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [x, y, z]')]) caplog.clear() def test_logging_sampled_basic_rvs_posterior_deterministic(self, caplog): with pm.Model() as m: x = pm.Normal('x') x_det = pm.Deterministic('x_det', (x + 1)) y = pm.Normal('y', x_det) z = pm.Normal('z', y, observed=0) idata = az_from_dict(posterior={'x': np.zeros(5), 'x_det': np.ones(5), 'y': np.ones(5)}) with m: pm.sample_posterior_predictive(idata, var_names=['x_det', 'z']) assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [y, z]')]) caplog.clear() def make_mock_model(): rng = np.random.default_rng(seed=42) data = rng.normal(loc=1, scale=0.2, size=(10, 3)) with pm.Model() as model: model.add_coord('name', ['A', 'B', 'C'], mutable=True) model.add_coord('obs', list(range(10, 20)), mutable=True) offsets = pm.MutableData('offsets', rng.normal(0, 1, size=(10,))) a = pm.Normal('a', mu=0, sigma=1, dims=['name']) b = pm.Normal('b', mu=offsets, sigma=1) mu = pm.Deterministic('mu', (a + b[(..., None)]), dims=['obs', 'name']) sigma = pm.HalfNormal('sigma', sigma=1, dims=['name']) data = pm.MutableData('y_obs', data, dims=['obs', 'name']) pm.Normal('y', mu=mu, sigma=sigma, observed=data, dims=['obs', 'name']) return model (scope='class') def mock_multitrace(self): with self.make_mock_model(): trace = pm.sample(draws=10, tune=10, chains=2, progressbar=False, compute_convergence_checks=False, return_inferencedata=False, random_seed=42) return trace (scope='class', params=['MultiTrace', 'InferenceData', 'Dataset']) def mock_sample_results(self, request, mock_multitrace): kind = request.param trace = mock_multitrace model = self.make_mock_model() if (kind == 'MultiTrace'): return (kind, trace, model) else: idata = pm.to_inference_data(trace, save_warmup=False, model=model, log_likelihood=False) if (kind == 'Dataset'): return (kind, idata.posterior, model) else: return (kind, idata, model) def test_logging_sampled_basic_rvs_posterior_mutable(self, mock_sample_results, caplog): (kind, samples, model) = mock_sample_results with model: pm.sample_posterior_predictive(samples) if (kind == 'MultiTrace'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear() elif (kind == 'InferenceData'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [y]')]) caplog.clear() elif (kind == 'Dataset'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [b, y]')]) caplog.clear() original_offsets = model['offsets'].get_value() with model: pm.set_data({'offsets': (original_offsets + 1)}) pm.sample_posterior_predictive(samples) if (kind == 'MultiTrace'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear() elif (kind == 'InferenceData'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [b, y]')]) caplog.clear() elif (kind == 'Dataset'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [b, y]')]) caplog.clear() with model: model.set_dim('name', new_length=4, coord_values=['D', 'E', 'F', 'G']) pm.set_data({'offsets': original_offsets, 'y_obs': np.zeros((10, 4))}) pm.sample_posterior_predictive(samples) if (kind == 'MultiTrace'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear() elif (kind == 'InferenceData'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, sigma, y]')]) caplog.clear() elif (kind == 'Dataset'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear() with model: model.set_dim('name', new_length=3, coord_values=['A', 'B', 'D']) pm.set_data({'offsets': (original_offsets + 1), 'y_obs': np.zeros((10, 3))}) pm.sample_posterior_predictive(samples) if (kind == 'MultiTrace'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear() elif (kind == 'InferenceData'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear() elif (kind == 'Dataset'): assert (caplog.record_tuples == [('pymc.sampling.forward', logging.INFO, 'Sampling: [a, b, sigma, y]')]) caplog.clear()
class LFM(nn.Module): def __init__(self, num_channels): super(LFM, self).__init__() self.conv1 = nn.Conv2d((2 * num_channels), (2 * num_channels), kernel_size=1, stride=1, padding=0) self.conv2 = nn.Conv2d((2 * num_channels), (2 * num_channels), kernel_size=1, stride=1, padding=0) def make_gaussian(self, y_idx, x_idx, height, width, sigma=7): (yv, xv) = torch.meshgrid([torch.arange(0, height), torch.arange(0, width)]) yv = yv.unsqueeze(0).float().cuda() xv = xv.unsqueeze(0).float().cuda() g = torch.exp(((- (((yv - y_idx) ** 2) + ((xv - x_idx) ** 2))) / (2 * (sigma ** 2)))) return g.unsqueeze(0) def forward(self, x): (b, c, h, w) = x.shape x = x.float() y = torch.fft.fft2(x) (h_idx, w_idx) = ((h // 2), (w // 2)) high_filter = self.make_gaussian(h_idx, w_idx, h, w) y = (y * (1 - high_filter)) y_imag = y.imag y_real = y.real y_f = torch.cat([y_real, y_imag], dim=1) y = F.relu(self.conv1(y_f)) y = self.conv2(y).float() (y_real, y_imag) = torch.chunk(y, 2, dim=1) y = torch.complex(y_real, y_imag) y = torch.fft.ifft2(y, s=(h, w)).float() return (x + y)
class GripperControllerServer(GripperController): def __init__(self, robot_name, create_node=True, upper_limit=0.037, lower_limit=0.01, des_pos_max=1, des_pos_min=0): super(GripperControllerServer, self).__init__(robot_name, create_node, upper_limit, lower_limit, des_pos_max, des_pos_min) rospy.Service('open_gripper', OpenGripper, self.open_gripper_service) rospy.Service('get_gripper_desired_state', GetGripperDesiredState, self.get_gripper_desired_state_service) rospy.Subscriber('/gripper_despos', Float64, self._gripper_despos_callback) def _gripper_despos_callback(self, msg): self.set_continuous_position(msg.data) def open_gripper_service(self, req): self.open() return OpenGripperResponse() def get_gripper_desired_state_service(self, req): return GetGripperDesiredStateResponse(self.get_gripper_target_position())
class OptionRendererMixin(): def render_option(self, xml, option): if (option['uri'] not in self.uris): self.uris.add(option['uri']) xml.startElement('option', {'dc:uri': option['uri']}) self.render_text_element(xml, 'uri_prefix', {}, option['uri_prefix']) self.render_text_element(xml, 'uri_path', {}, option['uri_path']) self.render_text_element(xml, 'dc:comment', {}, option['comment']) for (lang_code, lang_string, lang_field) in get_languages(): self.render_text_element(xml, 'text', {'lang': lang_code}, option[('text_%s' % lang_code)]) self.render_text_element(xml, 'help', {'lang': lang_code}, option[('help_%s' % lang_code)]) self.render_text_element(xml, 'view_text', {'lang': lang_code}, option[('view_text_%s' % lang_code)]) self.render_text_element(xml, 'additional_input', {}, option['additional_input']) xml.endElement('option')
def _looks_like_special_alias(node: Call) -> bool: return (isinstance(node.func, Name) and (((not PY39_PLUS) and (node.func.name == '_VariadicGenericAlias') and ((isinstance(node.args[0], Name) and (node.args[0].name == 'tuple')) or (isinstance(node.args[0], Attribute) and (node.args[0].as_string() == 'collections.abc.Callable')))) or (PY39_PLUS and (((node.func.name == '_TupleType') and isinstance(node.args[0], Name) and (node.args[0].name == 'tuple')) or ((node.func.name == '_CallableType') and isinstance(node.args[0], Attribute) and (node.args[0].as_string() == 'collections.abc.Callable'))))))
class Speech2Text2Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='<s>', pad_token='<pad>', eos_token='</s>', unk_token='<unk>', do_lower_case=False, merges_file=None, **kwargs): super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, **kwargs) self.do_lower_case = do_lower_case with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} if (merges_file is None): logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.') self.bpe_ranks = None self.cache = None else: with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:(- 1)] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + BPE_TOKEN_MERGES),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return token while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == ('\n ' + BPE_TOKEN_MERGES)): word = ('\n' + BPE_TOKEN_MERGES) if word.endswith(BPE_TOKEN_MERGES): word = word.replace(BPE_TOKEN_MERGES, '') word = word.replace(' ', BPE_TOKEN_VOCAB) self.cache[token] = word return word def _tokenize(self, text): if (self.bpe_ranks is None): raise ValueError('This tokenizer was instantiated without a `merges.txt` file, so that it can only be used for decoding, not for encoding.Make sure to provide `merges.txt` file at instantiation to enable encoding.') if self.do_lower_case: text = text.lower() text = text.split() split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token: str) -> int: return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: List[str]) -> str: string = ' '.join(tokens) string = ''.join(string.split(BPE_TOKEN_VOCAB)) return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) merges_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])) with open(vocab_file, 'w', encoding='utf-8') as f: f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')) index = 0 if (self.bpe_ranks is None): return (vocab_file,) with open(merges_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning(f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (vocab_file, merges_file)
def PR_curve(label_path, pred_path, num_total): with open(label_path, 'rb') as input: label_entitypair = pickle.load(input) with open(pred_path, 'rb') as input: pred_entitypair = pickle.load(input) list_pred = [] for key in pred_entitypair.keys(): tmp_prob = pred_entitypair[key][0] tmp_relation = pred_entitypair[key][1] tmp_entitypair = key list_pred.append((tmp_prob, tmp_entitypair, tmp_relation)) list_pred = sorted(list_pred, key=(lambda x: x[0]), reverse=True) list_pred = list_pred[:2001] true_positive = 0 Precision = [] Recall = [] for (i, item) in enumerate(list_pred): tmp_entitypair = item[1] tmp_relation = item[2] label_relations = label_entitypair[tmp_entitypair] if (tmp_relation in label_relations): true_positive += 1 if (((i % 10) == 0) and (i != 0)): Precision.append((true_positive / i)) Recall.append((true_positive / num_total)) return (Precision, Recall)
def testPositionalArgs(run_cli): out = run_cli('bugzilla login --xbadarg foo', None, expectfail=True) assert ('unrecognized arguments: --xbadarg' in out) out = run_cli('bugzilla modify 123456 --foobar --status NEW', None, expectfail=True) assert ('unrecognized arguments: --foobar' in out)
def test_minibatch_unit_variance_mlpg_gradcheck(): static_dim = 2 T = 5 for windows in _get_windows_set(): batch_size = 5 torch.manual_seed(1234) means = torch.rand(T, (static_dim * len(windows))) means_expanded = means.expand(batch_size, means.shape[0], means.shape[1]) reshaped_means = torch.from_numpy(G.reshape_means(means.numpy(), static_dim)) reshaped_means_expanded = reshaped_means.expand(batch_size, reshaped_means.shape[0], reshaped_means.shape[1]) y = G.mlpg(means.numpy(), np.ones((static_dim * len(windows))), windows) y = torch.from_numpy(y) y_expanded = y.expand(batch_size, y.size(0), y.size(1)) means.requires_grad = True means_expanded.requires_grad = True reshaped_means.requires_grad = True reshaped_means_expanded.requires_grad = True R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T)) y_hat1 = AF.unit_variance_mlpg(R, reshaped_means) y_hat2 = AF.unit_variance_mlpg(R, reshaped_means_expanded) for i in range(batch_size): assert np.allclose(y_hat1.data.numpy(), y_hat2[i].data.numpy()) nn.MSELoss()(y_hat1, y).backward() nn.MSELoss()(y_hat2, y_expanded).backward() for i in range(batch_size): grad1 = reshaped_means.grad.data.numpy() grad2 = reshaped_means_expanded.grad[i].data.numpy() assert np.allclose(grad1, grad2, atol=1e-06) y_hat3 = AF.unit_variance_mlpg(R, means) y_hat4 = AF.unit_variance_mlpg(R, means_expanded) for i in range(batch_size): assert np.allclose(y_hat1.data.numpy(), y_hat3.data.numpy()) assert np.allclose(y_hat3.data.numpy(), y_hat4[i].data.numpy()) nn.MSELoss()(y_hat3, y).backward() nn.MSELoss()(y_hat4, y_expanded).backward() for i in range(batch_size): grad1 = means.grad.data.numpy() grad2 = means_expanded.grad[i].data.numpy() assert np.allclose(grad1, grad2, atol=1e-06)
def points_in_boxes_cpu(points, boxes): assert (boxes.shape[1] == 7) assert (points.shape[1] == 3) (points, is_numpy) = common_utils.check_numpy_to_torch(points) (boxes, is_numpy) = common_utils.check_numpy_to_torch(boxes) point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int) roiaware_pool3d_cuda.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices) return (point_indices.numpy() if is_numpy else point_indices)
def _get_block_fn(stage_args): block_type = stage_args.pop('block_type') assert (block_type in ('dark', 'edge', 'bottle')) if (block_type == 'dark'): return (DarkBlock, stage_args) elif (block_type == 'edge'): return (EdgeBlock, stage_args) else: return (BottleneckBlock, stage_args)
def test_to_dict_no_proj4(): crs = CRS({'a': 6371229.0, 'b': 6371229.0, 'lon_0': (- 10.0), 'o_lat_p': 30.0, 'o_lon_p': 0.0, 'o_proj': 'longlat', 'proj': 'ob_tran'}) with pytest.warns(UserWarning): assert (crs.to_dict() == {'R': 6371229, 'lon_0': (- 10), 'no_defs': None, 'o_lat_p': 30, 'o_lon_p': 0, 'o_proj': 'longlat', 'proj': 'ob_tran', 'type': 'crs'})
(eq=False, hash=False, repr=False) class _LockImpl(AsyncContextManagerMixin): _lot: ParkingLot = attr.ib(factory=ParkingLot, init=False) _owner: (Task | None) = attr.ib(default=None, init=False) def __repr__(self) -> str: if self.locked(): s1 = 'locked' s2 = f' with {len(self._lot)} waiters' else: s1 = 'unlocked' s2 = '' return f'<{s1} {self.__class__.__name__} object at {id(self):#x}{s2}>' def locked(self) -> bool: return (self._owner is not None) _ki_protection def acquire_nowait(self) -> None: task = trio.lowlevel.current_task() if (self._owner is task): raise RuntimeError('attempt to re-acquire an already held Lock') elif ((self._owner is None) and (not self._lot)): self._owner = task else: raise trio.WouldBlock _ki_protection async def acquire(self) -> None: (await trio.lowlevel.checkpoint_if_cancelled()) try: self.acquire_nowait() except trio.WouldBlock: (await self._lot.park()) else: (await trio.lowlevel.cancel_shielded_checkpoint()) _ki_protection def release(self) -> None: task = trio.lowlevel.current_task() if (task is not self._owner): raise RuntimeError("can't release a Lock you don't own") if self._lot: (self._owner,) = self._lot.unpark(count=1) else: self._owner = None def statistics(self) -> LockStatistics: return LockStatistics(locked=self.locked(), owner=self._owner, tasks_waiting=len(self._lot))
class MobileDevice(): def __init__(self, path: str, server: ObserverAPI): self.server = server self.path = path self.communicator: Optional[DeviceCommunicator] = None self.paired = False self.connected = False self.name: Optional[str] = None self.notification_source: Optional[BluezGattCharacteristicAPI] = None self.control_point: Optional[BluezGattCharacteristicAPI] = None self.data_source: Optional[BluezGattCharacteristicAPI] = None def set_notification_source(self, path: ObjPath) -> None: self.unsubscribe() self.notification_source = BluezGattCharacteristicAPI.connect(path) self.try_subscribe() def set_control_point(self, path: ObjPath) -> None: self.unsubscribe() self.control_point = BluezGattCharacteristicAPI.connect(path) self.try_subscribe() def set_data_source(self, path: ObjPath) -> None: self.unsubscribe() self.data_source = BluezGattCharacteristicAPI.connect(path) self.try_subscribe() def set_paired(self, paired: bool) -> None: self.unsubscribe() self.paired = paired self.try_subscribe() def set_connected(self, connected: bool) -> None: self.unsubscribe() self.connected = connected self.try_subscribe() def set_name(self, name: str) -> None: self.name = name self.try_subscribe() def unsubscribe(self) -> None: self.communicator = None def try_subscribe(self) -> None: log.debug(f'{self.path}: {self.paired} {self.connected} {(not self.communicator)}') if (not (self.paired and self.connected and self.name and self.notification_source and self.control_point and self.data_source and (not self.communicator))): return log.info('Asking for notifications...') TaskRestarter(120, 1, self.try_asking, (lambda : log.info('Asking for notifications: success.')), (lambda : log.error('Failed to subscribe to notifications.'))).try_running_bg() def try_asking(self) -> bool: assert (self.notification_source and self.control_point and self.data_source) try: self.data_source.StartNotify() self.notification_source.StartNotify() except Exception as e: log.warn(f'Failed to start subscribe to notifications (is phone paired?): {e}') if (get_dbus_error_name(e) is not None): log.warn(f'Original error: {get_dbus_error_name(e)}') return False comm = DeviceCommunicator(self) comm.attach() self.communicator = comm return True def handle_action(self, notification_id: int, is_positive: bool) -> None: if (self.communicator is not None): self.communicator.ask_for_action(notification_id, is_positive)
class AddDebugSignalPass(BasePass): debug_pins = MetadataKey(set) def __call__(self, top, signal_names): s_signal_names = [] for name in signal_names: assert name.startswith('top.') assert ('[' not in name), "Currently don't support any array of components" s_signal_names.append(f's{name[3:]}') signals = sorted(top.get_all_object_filter((lambda x: (repr(x) in s_signal_names))), key=repr) for (i, signal) in enumerate(signals): debug_pin_name = f'debug_{i}' last_port = signal host = signal.get_host_component() while (host is not None): this_port = OutPort(last_port.get_type()) top.add_value_port(host, debug_pin_name, this_port) top.add_connection(this_port, last_port) last_port = this_port host = host.get_parent_object()
def read_data(in_f): with io.open(in_f, 'r', encoding='utf-8') as json_data: data = json.load(json_data) for show in data: show_id = show['id'] for (id_s, scene) in enumerate(show['scenes']): for (id_t, talk) in enumerate(scene): if ('meta' in talk): continue text = talk['text'] if text.startswith('['): continue (yield (text, show_id, id_s, id_t))
class SocketWrapper(AsyncExitStack): def __init__(self, grpc_connection: GRPCConnection, stream: anyio.abc.SocketStream): super().__init__() self._set_socket_options(stream) self._stream = stream self._grpc_connection = grpc_connection self._flush_event = anyio.Event() self._running = True async def __aenter__(self): (await super().__aenter__()) task_group = (await self.enter_async_context(anyio.create_task_group())) task_group.start_soon(self._writer_thread) async def callback(): self._running = False self._flush_event.set() self.push_async_callback(callback) return self def _set_socket_options(stream: anyio.abc.SocketStream): sock = stream.extra(anyio.abc.SocketAttribute.raw_socket) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 300) elif is_darwin(): TCP_KEEPALIVE = 16 sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, 300) if (not is_windows()): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 30) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) async def _writer_thread(self): while True: data = self._grpc_connection.data_to_send() if data: (await self._stream.send(data)) elif self._running: (await self._flush_event.wait()) self._flush_event = anyio.Event() else: return async def flush(self): self._flush_event.set() async def recv(self, buffer_size: int): return (await self._stream.receive(buffer_size))
_vcs_handler('git', 'pieces_from_vcs') def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): GITS = ['git'] if (sys.platform == 'win32'): GITS = ['git.cmd', 'git.exe'] env = os.environ.copy() env.pop('GIT_DIR', None) runner = functools.partial(runner, env=env) (_, rc) = runner(GITS, ['rev-parse', '--git-dir'], cwd=root, hide_stderr=True) if (rc != 0): if verbose: print(('Directory %s not under git control' % root)) raise NotThisMethod("'git rev-parse --git-dir' returned error") (describe_out, rc) = runner(GITS, ['describe', '--tags', '--dirty', '--always', '--long', '--match', f'{tag_prefix}[[:digit:]]*'], cwd=root) if (describe_out is None): raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() (full_out, rc) = runner(GITS, ['rev-parse', 'HEAD'], cwd=root) if (full_out is None): raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces['long'] = full_out pieces['short'] = full_out[:7] pieces['error'] = None (branch_name, rc) = runner(GITS, ['rev-parse', '--abbrev-ref', 'HEAD'], cwd=root) if ((rc != 0) or (branch_name is None)): raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if (branch_name == 'HEAD'): (branches, rc) = runner(GITS, ['branch', '--contains'], cwd=root) if ((rc != 0) or (branches is None)): raise NotThisMethod("'git branch --contains' returned error") branches = branches.split('\n') if ('(' in branches[0]): branches.pop(0) branches = [branch[2:] for branch in branches] if ('master' in branches): branch_name = 'master' elif (not branches): branch_name = None else: branch_name = branches[0] pieces['branch'] = branch_name git_describe = describe_out dirty = git_describe.endswith('-dirty') pieces['dirty'] = dirty if dirty: git_describe = git_describe[:git_describe.rindex('-dirty')] if ('-' in git_describe): mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe) if (not mo): pieces['error'] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces full_tag = mo.group(1) if (not full_tag.startswith(tag_prefix)): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print((fmt % (full_tag, tag_prefix))) pieces['error'] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" return pieces pieces['closest-tag'] = full_tag[len(tag_prefix):] pieces['distance'] = int(mo.group(2)) pieces['short'] = mo.group(3) else: pieces['closest-tag'] = None (out, rc) = runner(GITS, ['rev-list', 'HEAD', '--left-right'], cwd=root) pieces['distance'] = len(out.split()) date = runner(GITS, ['show', '-s', '--format=%ci', 'HEAD'], cwd=root)[0].strip() date = date.splitlines()[(- 1)] pieces['date'] = date.strip().replace(' ', 'T', 1).replace(' ', '', 1) return pieces