code
stringlengths
281
23.7M
class FakeFCI(pyscf.fci.direct_spin1_symm.FCI): def __init__(self, mol, nelec=None): self.nelec = nelec self.fake_potential = None super().__init__(mol) def kernel(self, h1, h2, norb, nelec, *args, **kwargs): if (self.nelec is not None): nelec = self.nelec if (self.fake_potential is not None): h1 = (h1 + (np.identity(norb) * self.fake_potential)) return super().kernel(h1, h2, norb, nelec, *args, **kwargs) def make_rdm1(self, civec, norb, nelec): if (self.nelec is not None): nelec = self.nelec return super().make_rdm1(civec, norb, nelec) def make_rdm12(self, civec, norb, nelec): if (self.nelec is not None): nelec = self.nelec return super().make_rdm12(civec, norb, nelec) def spin_square(self, civec, norb, nelec): if (self.nelec is not None): nelec = self.nelec return super().spin_square(civec, norb, nelec)
def evaluate(prefix='./', mode='dev', evaluate_dir=None, evaluate_prefix=None, output_file=None, output_dir=None, python_evaluate=False, triplet=False): sclite_path = './software/sclite' print(os.getcwd()) os.system(f'bash {evaluate_dir}/preprocess.sh {(prefix + output_file)} {prefix}tmp.ctm {prefix}tmp2.ctm') os.system(f'cat {evaluate_dir}/{evaluate_prefix}-{mode}.stm | sort -k1,1 > {prefix}tmp.stm') os.system(f'python {evaluate_dir}/mergectmstm.py {prefix}tmp2.ctm {prefix}tmp.stm') os.system(f'cp {prefix}tmp2.ctm {prefix}out.{output_file}') if python_evaluate: ret = wer_calculation(f'{evaluate_dir}/{evaluate_prefix}-{mode}.stm', f'{prefix}out.{output_file}') if triplet: wer_calculation(f'{evaluate_dir}/{evaluate_prefix}-{mode}.stm', f'{prefix}out.{output_file}', f'{prefix}out.{output_file}'.replace('.ctm', '-conv.ctm')) return ret if (output_dir is not None): if (not os.path.isdir((prefix + output_dir))): os.makedirs((prefix + output_dir)) os.system(f'{sclite_path} -h {prefix}out.{output_file} ctm -r {prefix}tmp.stm stm -f 0 -o sgml sum rsum pra -O {(prefix + output_dir)}') else: os.system(f'{sclite_path} -h {prefix}out.{output_file} ctm -r {prefix}tmp.stm stm -f 0 -o sgml sum rsum pra') ret = os.popen(f'{sclite_path} -h {prefix}out.{output_file} ctm -r {prefix}tmp.stm stm -f 0 -o dtl stdout |grep Error').readlines()[0] return float(ret.split('=')[1].split('%')[0])
def concat_2d(ds: Dataset, dims: Tuple[(Hashable, Hashable)]) -> DataArray: arrs = [] for var in ds: arr = ds[var] if (arr.dims[0] != dims[0]): continue if (arr.ndim > 2): raise ValueError(f'All variables must have <= 2 dimensions (variable {var} has shape {arr.shape})') if (arr.ndim == 2): arr = arr.rename({arr.dims[1]: dims[1]}) else: arr = arr.expand_dims(dim=dims[1], axis=1) arrs.append(arr) return xr.concat(arrs, dim=dims[1])
def find_byte_range_of_section(path, start_string): with open(path) as f: start = None end = None l = 0 for line in f: if (start and (line.strip() == '') and ((l - start) > 100)): end = l break if ((start_string in line) and (not start)): start = l l += (len(line) + len('\n')) return [start, end]
class INPDiff(object): def __init__(self, model1=None, model2=None): m1 = model1 m2 = model2 if isinstance(m1, str): m1 = swmmio.Model(m1) if isinstance(m2, str): m2 = swmmio.Model(m2) self.m1 = m1 self.m2 = m2 self.diffs = OrderedDict() m1_sects = get_inp_sections_details(m1.inp.path) m2_sects = get_inp_sections_details(m2.inp.path) sects = (list(m1_sects.keys()) + list(m2_sects.keys())) seen = set() self.all_sections = [x for x in sects if (not ((x in seen) or seen.add(x)))] self.all_inp_objects = OrderedDict(m1_sects) self.all_inp_objects.update(m2_sects) for section in self.all_sections: if (section not in problem_sections): changes = INPSectionDiff(m1, m2, section) self.diffs[section] = changes def __str__(self): s = '--- {}\n+++ {}\n\n'.format(self.m1.inp.path, self.m2.inp.path) diffs = '\n\n'.join(['{}\n{}'.format(sect, d.__str__()) for (sect, d) in self.diffs.items()]) return (s + diffs)
class PlayChunkData(Packet): id = 32 to = 1 def __init__(self, chunk: Chunk, full: bool) -> None: super().__init__() self.chunk = chunk self.full = full def encode(self) -> bytes: out = ((Buffer.pack('i', self.chunk.x) + Buffer.pack('i', self.chunk.z)) + Buffer.pack('?', self.full)) mask = 0 chunk_sections_buffer = Buffer() for (y, section) in self.chunk.sections.items(): if (y >= 0): mask |= (1 << y) chunk_sections_buffer.write(Buffer.pack_chunk_section_blocks(section)) out += (Buffer.pack_varint(mask) + Buffer.pack_nbt(nbt.TAG_Compound('', [self.chunk['Heightmaps']['MOTION_BLOCKING'], self.chunk['Heightmaps']['WORLD_SURFACE']]))) if self.full: out += (Buffer.pack_varint(len(self.chunk['Biomes'])) + b''.join([Buffer.pack_varint(n) for n in self.chunk['Biomes']])) out += (Buffer.pack_varint(len(chunk_sections_buffer)) + chunk_sections_buffer.read()) out += Buffer.pack_varint(0) return out
class TestConstraintPropagation(unittest.TestCase): def test_3by3_matrix(self): affinity = np.array([[1, 0.25, 0], [0.31, 1, 0], [0, 0, 1]]) constraint_matrix = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]]) adjusted_affinity = constraint.ConstraintPropagation(alpha=0.6).adjust_affinity(affinity, constraint_matrix) expected = np.array([[1, 0.97, 0], [1.03, 1, 0], [0, 0, 1]]) np.testing.assert_allclose(np.array(adjusted_affinity), np.array(expected), atol=0.01)
class ExpressionStmt(Statement): __slots__ = ('expr',) __match_args__ = ('expr',) expr: Expression def __init__(self, expr: Expression) -> None: super().__init__() self.expr = expr def accept(self, visitor: StatementVisitor[T]) -> T: return visitor.visit_expression_stmt(self)
class CheckFnSignatureTests(unittest.TestCase): def test_no_args(self): def foo(): pass with self.assertRaises(ValueError): server._fn_accepts_additional_args(foo, []) def test_var_args(self): def foo(*args): pass server._fn_accepts_additional_args(foo, []) server._fn_accepts_additional_args(foo, ['arg1']) server._fn_accepts_additional_args(foo, ['arg1', 'arg2']) def test_config_arg_only(self): def foo(app_config): pass server._fn_accepts_additional_args(foo, []) with self.assertRaises(ValueError): server._fn_accepts_additional_args(foo, ['extra_arg']) def test_config_arg_with_var_args(self): def foo(app_config, *args): pass server._fn_accepts_additional_args(foo, []) server._fn_accepts_additional_args(foo, ['arg1']) server._fn_accepts_additional_args(foo, ['arg1', 'arg2']) def test_additional_args(self): def foo(app_config, args): pass server._fn_accepts_additional_args(foo, []) server._fn_accepts_additional_args(foo, ['arg1']) server._fn_accepts_additional_args(foo, ['arg1', 'arg2']) def test_additional_args_with_var_args(self): def foo(app_config, args, *extra): pass server._fn_accepts_additional_args(foo, []) server._fn_accepts_additional_args(foo, ['arg1']) server._fn_accepts_additional_args(foo, ['arg1', 'arg2']) def test_kwargs(self): def foo(app_config, arg1, *, bar, **kwargs): pass server._fn_accepts_additional_args(foo, []) server._fn_accepts_additional_args(foo, ['arg1', 'arg2', 'arg3']) server._fn_accepts_additional_args(foo, ['arg1'])
def test_opts(): paramSpec = [dict(name='bool', type='bool', readonly=True), dict(name='color', type='color', readonly=True)] param = pt.Parameter.create(name='params', type='group', children=paramSpec) tree = pt.ParameterTree() tree.setParameters(param) assert (_getWidget(param.param('bool')).isEnabled() is False) assert (_getWidget(param.param('bool')).isEnabled() is False)
_task('hubert_pretraining', dataclass=HubertPretrainingConfig) class HubertPretrainingTask(FairseqTask): cfg: HubertPretrainingConfig def __init__(self, cfg: HubertPretrainingConfig, dictionaries: Dict[(str, Dictionary)]) -> None: super().__init__(cfg) logger.info(f'current directory is {os.getcwd()}') logger.info(f'HubertPretrainingTask Config {cfg}') self._dictionaries = dictionaries self._source_dictionary = None self._target_dictionary = None if (len(self.dictionaries) == 1): self._target_dictionary = self.dictionaries[0] self.blank_symbol = '<s>' def source_dictionary(self) -> Optional[Dictionary]: return self._source_dictionary def target_dictionary(self) -> Optional[Dictionary]: return self._target_dictionary def dictionaries(self) -> List[Dictionary]: return [self._dictionaries[l] for l in self.cfg.labels] def setup_task(cls, cfg: HubertPretrainingConfig, **kwargs) -> 'HubertPretrainingTask': label_dir = (cfg.data if (cfg.label_dir is None) else cfg.label_dir) dictionaries = {label: (Dictionary.load(f'{label_dir}/dict.{label}.txt') if os.path.exists(f'{label_dir}/dict.{label}.txt') else None) for label in cfg.labels} return cls(cfg, dictionaries) def get_label_dir(self) -> str: if (self.cfg.label_dir is None): return self.cfg.data return self.cfg.label_dir def load_dataset(self, split: str, **kwargs) -> None: manifest = f'{self.cfg.data}/{split}.tsv' pad_list = [self._dictionaries[l].pad() for l in self.cfg.labels] eos_list = [self._dictionaries[l].eos() for l in self.cfg.labels] procs = [LabelEncoder(self._dictionaries[l]) for l in self.cfg.labels] paths = [f'{self.get_label_dir()}/{split}.{l}' for l in self.cfg.labels] self.datasets[split] = HubertDataset(manifest, sample_rate=self.cfg.sample_rate, label_paths=paths, label_rates=self.cfg.label_rate, pad_list=pad_list, eos_list=eos_list, label_processors=procs, max_keep_sample_size=None, min_keep_sample_size=self.cfg.min_sample_size, max_sample_size=self.cfg.max_sample_size, pad_audio=self.cfg.pad_audio, normalize=self.cfg.normalize, store_labels=False, random_crop=self.cfg.random_crop, single_target=self.cfg.single_target) def max_positions(self) -> Tuple[(int, int)]: return (sys.maxsize, sys.maxsize) def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array: return indices
def test_swift_session_class(): swift_session = SwiftSession(swift_storage_url='foo', swift_auth_token='bar') assert swift_session._creds assert (swift_session.get_credential_options()['SWIFT_STORAGE_URL'] == 'foo') assert (swift_session.get_credential_options()['SWIFT_AUTH_TOKEN'] == 'bar')
class LatentEditor(object): def __init__(self, stylegan_generator): self.generator = stylegan_generator self.interfacegan_directions = {'age': torch.load('editing/interfacegan_directions/age.pt').cuda(), 'smile': torch.load('editing/interfacegan_directions/smile.pt').cuda(), 'pose': torch.load('editing/interfacegan_directions/pose.pt').cuda()} def apply_interfacegan(self, latents, direction, factor=1, factor_range=None): edit_latents = [] direction = self.interfacegan_directions[direction] if (factor_range is not None): for f in range(*factor_range): edit_latent = (latents + (f * direction)) edit_latents.append(edit_latent) edit_latents = torch.stack(edit_latents).transpose(0, 1) else: edit_latents = (latents + (factor * direction)) return self._latents_to_image(edit_latents) def _latents_to_image(self, all_latents): sample_results = {} with torch.no_grad(): for (idx, sample_latents) in enumerate(all_latents): (images, _) = self.generator([sample_latents], randomize_noise=False, input_is_latent=True) sample_results[idx] = [tensor2im(image) for image in images] return sample_results
.parametrize('username,password', users) .parametrize('project_id', projects) def test_project_update_tasks_get(db, client, username, password, project_id): client.login(username=username, password=password) url = reverse('project_update_tasks', args=[project_id]) response = client.get(url) if (project_id in change_project_permission_map.get(username, [])): assert (response.status_code == 200) elif password: assert (response.status_code == 403) else: assert (response.status_code == 302)
class ImageNetTrain(ImageNetBase): NAME = 'ILSVRC2012_train' URL = ' AT_HASH = 'a306397ccf9c2eadc254227c0fd938e2' FILES = ['ILSVRC2012_img_train.tar'] SIZES = [] def __init__(self, process_images=True, data_root=None, **kwargs): self.process_images = process_images self.data_root = data_root super().__init__(**kwargs) def _prepare(self): if self.data_root: self.root = os.path.join(self.data_root, self.NAME) else: cachedir = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME) self.datadir = os.path.join(self.root, 'data') self.txt_filelist = os.path.join(self.root, 'filelist.txt') self.expected_length = 1281167 self.random_crop = retrieve(self.config, 'ImageNetTrain/random_crop', default=True) if (not tdu.is_prepared(self.root)): print('Preparing dataset {} in {}'.format(self.NAME, self.root)) datadir = self.datadir if (not os.path.exists(datadir)): path = os.path.join(self.root, self.FILES[0]) if ((not os.path.exists(path)) or (not (os.path.getsize(path) == self.SIZES[0]))): import academictorrents as at atpath = at.get(self.AT_HASH, datastore=self.root) assert (atpath == path) print('Extracting {} to {}'.format(path, datadir)) os.makedirs(datadir, exist_ok=True) with tarfile.open(path, 'r:') as tar: tar.extractall(path=datadir) print('Extracting sub-tars.') subpaths = sorted(glob.glob(os.path.join(datadir, '*.tar'))) for subpath in tqdm(subpaths): subdir = subpath[:(- len('.tar'))] os.makedirs(subdir, exist_ok=True) with tarfile.open(subpath, 'r:') as tar: tar.extractall(path=subdir) filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG')) filelist = [os.path.relpath(p, start=datadir) for p in filelist] filelist = sorted(filelist) filelist = ('\n'.join(filelist) + '\n') with open(self.txt_filelist, 'w') as f: f.write(filelist) tdu.mark_prepared(self.root)
def decode_subframe(inp, blocksize, sampledepth): inp.read_uint(1) subframe_type = inp.read_uint(6) shift = inp.read_uint(1) if (shift == 1): while (inp.read_uint(1) == 0): shift += 1 sampledepth -= shift if (subframe_type == 0): result = ([inp.read_signed_int(sampledepth)] * blocksize) elif (subframe_type == 1): result = [inp.read_signed_int(sampledepth) for _ in range(blocksize)] elif (8 <= subframe_type <= 12): result = decode_fixed_prediction_subframe(inp, (subframe_type - 8), blocksize, sampledepth) elif (32 <= subframe_type <= 63): result = decode_linear_predictive_coding_subframe(inp, (subframe_type - 31), blocksize, sampledepth) else: raise FLACDecodeException('Reserved subframe type') return [(v << shift) for v in result]
class WithInternationalPricingPipelineEngine(zf.WithFXRates, WithInternationalDailyBarData): def init_class_fixtures(cls): super(WithInternationalPricingPipelineEngine, cls).init_class_fixtures() adjustments = NullAdjustmentReader() cls.loaders = {GB_EQUITIES: EquityPricingLoader(cls.daily_bar_readers['XLON'], adjustments, cls.in_memory_fx_rate_reader), US_EQUITIES: EquityPricingLoader(cls.daily_bar_readers['XNYS'], adjustments, cls.in_memory_fx_rate_reader), CA_EQUITIES: EquityPricingLoader(cls.daily_bar_readers['XTSE'], adjustments, cls.in_memory_fx_rate_reader)} cls.engine = SimplePipelineEngine(get_loader=cls.get_loader, asset_finder=cls.asset_finder) def get_loader(cls, column): return cls.loaders[column.domain] def run_pipeline(self, pipeline, start_date, end_date): return self.engine.run_pipeline(pipeline, start_date, end_date)
def get_class_by_name(problem_name, model_name): if (problem_name == 'conditioned_separation'): if (model_name == 'CUNET_TFC_FiLM'): return DCUN_TFC_FiLM_Framework elif (model_name == 'CUNET_TFC_FiLM_TDF'): return DCUN_TFC_FiLM_TDF_Framework elif (model_name == 'CUNET_TFC_FiLM_LaSAFT'): return DCUN_TFC_FiLM_LaSAFT_Framework elif (model_name == 'CUNET_TFC_GPoCM'): return DCUN_TFC_GPoCM_Framework elif (model_name == 'CUNET_TFC_GPoCM_TDF'): return DCUN_TFC_GPoCM_TDF_Framework elif (model_name == 'CUNET_TFC_GPoCM_LaSAFT'): return DCUN_TFC_GPoCM_LaSAFT_Framework elif (model_name == 'lasaft_net'): return DCUN_TFC_GPoCM_LaSAFT_Framework elif (model_name == 'CUNET_TFC_GPoCM_LightSAFT'): return DCUN_TFC_GPoCM_LightSAFT_Framework elif (model_name == 'lightsaft_net'): return DCUN_TFC_GPoCM_LightSAFT_Framework else: raise NotImplementedError elif (problem_name == 'dedicated'): raise NotImplementedError
def test_ioc_file(runner, mocker): mocked_func = mocker.patch('products.vmware_cb_response.CbResponse._authenticate') mocked_nested_process_search = mocker.patch('products.vmware_cb_response.CbResponse.nested_process_search') with runner.isolated_filesystem() as temp_dir: ioc_file_path = os.path.join(temp_dir, 'ioc_list.txt') with open(ioc_file_path, 'w') as deffile: deffile.write('127.0.0.1') result = runner.invoke(cli, ['--iocfile', ioc_file_path, '--ioctype', 'ipaddr']) assert ('Processing IOC file' in result.output) mocked_func.assert_called_once() mocked_nested_process_search.assert_called_once_with(Tag(f'IOC - {ioc_file_path}', 'ioc_list.txt'), {'ipaddr': ['127.0.0.1']}, {})
def test_executor_should_append_subdirectory_for_git(mocker: MockerFixture, tmp_venv: VirtualEnv, pool: RepositoryPool, config: Config, artifact_cache: ArtifactCache, io: BufferedIO, mock_file_downloads: None, wheel: Path) -> None: package = Package('demo', '0.1.2', source_type='git', source_reference='master', source_resolved_reference='123456', source_url=' source_subdirectory='two') chef = Chef(artifact_cache, tmp_venv, Factory.create_pool(config)) chef.set_directory_wheel(wheel) spy = mocker.spy(chef, 'prepare') executor = Executor(tmp_venv, pool, config, io) executor._chef = chef executor.execute([Install(package)]) archive_arg = spy.call_args[0][0] assert (archive_arg == (tmp_venv.path / 'src/demo/subdirectories/two'))
def _get_command_doc_count(cmd, parser): for param in inspect.signature(cmd.handler).parameters.values(): if (cmd.get_arg_info(param).value in cmd.COUNT_COMMAND_VALUES): (yield '') (yield '==== count') try: (yield parser.arg_descs[param.name]) except KeyError: try: (yield parser.arg_descs['count']) except KeyError as e: raise KeyError('No description for count arg {!r} of command {!r}!'.format(param.name, cmd.name)) from e
def correct_order(node_id, sampled_nodes, train_labels, multi, nlabel): (matched_labels, matched_index) = ([], []) for (index, each) in enumerate(node_id): if (each in sampled_nodes): if multi: curr_label = np.zeros(nlabel).astype(int) curr_label[train_labels[each]] = 1 matched_labels.append(curr_label) else: matched_labels.append(train_labels[each]) matched_index.append(index) return (np.array(matched_labels), np.array(matched_index))
def get_member_flags(name: str, itype: Instance, class_obj: bool=False) -> set[int]: info = itype.type method = info.get_method(name) setattr_meth = info.get_method('__setattr__') if method: if isinstance(method, Decorator): if (method.var.is_staticmethod or method.var.is_classmethod): return {IS_CLASS_OR_STATIC} elif method.var.is_property: return {IS_VAR} elif method.is_property: assert isinstance(method, OverloadedFuncDef) dec = method.items[0] assert isinstance(dec, Decorator) if (dec.var.is_settable_property or setattr_meth): return {IS_VAR, IS_SETTABLE} else: return {IS_VAR} return set() node = info.get(name) if (not node): if setattr_meth: return {IS_SETTABLE} if (itype.extra_attrs and (name in itype.extra_attrs.attrs)): flags = set() if (name not in itype.extra_attrs.immutable): flags.add(IS_SETTABLE) return flags return set() v = node.node if isinstance(v, Var): if v.is_property: return {IS_VAR} flags = {IS_VAR} if (not v.is_final): flags.add(IS_SETTABLE) if v.is_classvar: flags.add(IS_CLASSVAR) if (class_obj and v.is_inferred): flags.add(IS_CLASSVAR) return flags return set()
class Settings(): _settings_file = 'settings.pickle' def __init__(self): self.book = BookSettings() self.database = DatabaseSettings() self.moveSelection = MoveSelectionSettings() self.engine = EngineSettings() self.load_from_file() def save_to_file(self): with open(self._settings_file, 'wb') as file: pickle.dump(self, file, protocol=pickle.HIGHEST_PROTOCOL) logging.info(f'Saved settings to {self._settings_file}') def load_from_file(self): if (not os.path.exists(self._settings_file)): logging.info(f'No settings file {self._settings_file} found, skipping loading settings') return with open(self._settings_file, 'rb') as file: from_file = pickle.load(file) self.book = from_file.book self.database = from_file.database self.moveSelection = from_file.moveSelection self.engine = from_file.engine logging.info(f'Loaded settings from {self._settings_file}')
def _make_send_recv_callbacks(socket: WebSocketConnection) -> tuple[(SendCoroutine, RecvCoroutine)]: async def sock_send(value: Any) -> None: (await socket.send(json.dumps(value))) async def sock_recv() -> Any: data = (await socket.recv()) if (data is None): raise Stop() return json.loads(data) return (sock_send, sock_recv)
class DenseWTUnet(nn.Module): def __init__(self, growth_rate=32, block_config=(6, 12), num_init_features=4, bn_size=4, drop_rate=0): super(DenseWTUnet, self).__init__() self.DWT = common.DWT() self.IWT = common.IWT() self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(4, num_init_features, kernel_size=1, stride=1, padding=0, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True))])) num_layers1 = block_config[0] num_layers2 = block_config[1] self.block1 = common._DenseBlock(num_layers1, num_init_features, bn_size, growth_rate, drop_rate) num_features1 = (num_init_features + (growth_rate * num_layers1)) self.block2 = common._DenseBlock(num_layers2, (num_features1 * 4), bn_size, growth_rate, drop_rate) num_features2 = ((num_features1 * 4) + (growth_rate * num_layers2)) self.features1 = nn.Sequential(OrderedDict([('conv11', nn.Conv2d(num_features2, 256, kernel_size=1, stride=1, padding=0, bias=False)), ('norm11', nn.BatchNorm2d(256)), ('relu11', nn.ReLU(inplace=True))])) self.block3 = common._DenseBlock(num_layers2, 256, bn_size, growth_rate, drop_rate) self.features2 = nn.Sequential(OrderedDict([('conv22', nn.Conv2d(160, 24, kernel_size=1, stride=1, padding=0, bias=False)), ('norm22', nn.BatchNorm2d(24)), ('relu22', nn.ReLU(inplace=True))])) self.block4 = common._DenseBlock(num_layers1, (num_features1 + 24), bn_size, growth_rate, drop_rate) num_features3 = (220 + (growth_rate * num_layers1)) self.features3 = nn.Sequential(OrderedDict([('conv33', nn.Conv2d(num_features3, 4, kernel_size=1, stride=1, padding=0, bias=False)), ('norm33', nn.BatchNorm2d(4)), ('relu33', nn.ReLU(inplace=True))])) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) def forward(self, x): x1 = self.features(x) x21 = self.block1(x1) x2 = self.block3(self.features1(self.block2(self.DWT(x21)))) x3 = self.features2(self.IWT(x2)) x4 = torch.cat((x21, x3), 1) x4 = self.block4(x4) x5 = (self.features3(x4) + x) x_ehan = self.IWT(x5) return (x5, x_ehan)
def test_prepare_uv_t_counts(): num_bits_p = 6 eta = 10 num_atoms = 10 lambda_zeta = 10 num_bits_nuc_pos = 8 m_param = (2 ** ((2 * num_bits_p) + 3)) num_bits_m = (m_param - 1).bit_length() expected_cost = ((((3 * (num_bits_p ** 2)) + num_bits_p) + ((4 * num_bits_m) * (num_bits_p + 1))) + 4) expected_cost += ((((2 * 4) * (num_bits_p - 1)) + (6 * num_bits_p)) + 2) expected_cost += (lambda_zeta + int(np.ceil((lambda_zeta ** 0.5)))) prep = PrepareUVFirstQuantization(num_bits_p, eta, num_atoms, m_param, lambda_zeta, num_bits_nuc_pos) (_, counts) = prep.call_graph() qual_cost = counts[TGate()] prep = PrepareUVFirstQuantization(num_bits_p, eta, num_atoms, m_param, lambda_zeta, num_bits_nuc_pos, adjoint=True) (_, counts) = prep.call_graph() qual_cost += counts[TGate()] qual_cost //= 4 comp_diff = 1 assert (qual_cost == (expected_cost - comp_diff))
class Install(): def __init__(self, environment): self.output = environment.output self.env = environment if ((not self.env.is_installer) and (not self.env.updater)): self.ask_continue() self.check_missing_dep() self.check_conda_missing_dep() if (self.env.updater and (not self.env.missing_packages) and (not self.env.conda_missing_packages)): self.output.info('All Dependencies are up to date') return self.install_missing_dep() if self.env.updater: return self.output.info("All python3 dependencies are met.\r\nYou are good to go.\r\n\r\nEnter: 'python faceswap.py -h' to see the options\r\n 'python faceswap.py gui' to launch the GUI") def ask_continue(self): inp = input('Please ensure your System Dependencies are met. Continue? [y/N] ') if (inp in ('', 'N', 'n')): self.output.error('Please install system dependencies to continue') exit(1) def check_missing_dep(self): for pkg in self.env.required_packages: pkg = self.check_os_requirements(pkg) if (pkg is None): continue key = pkg.split('==')[0] if self.env.is_conda: key = CONDA_MAPPING.get(key, (key, None))[0] if (key not in self.env.installed_packages): self.env.missing_packages.append(pkg) continue elif (len(pkg.split('==')) > 1): if (pkg.split('==')[1] != self.env.installed_packages.get(key)): self.env.missing_packages.append(pkg) continue def check_os_requirements(package): if ((';' not in package) and ('sys_platform' not in package)): return package package = ''.join(package.split()) (pkg, tags) = package.split(';') tags = tags.split('==') sys_platform = tags[(tags.index('sys_platform') + 1)].replace('"', '').replace("'", '') if (sys_platform == sys.platform): return pkg return None def check_conda_missing_dep(self): if (not self.env.is_conda): return for pkg in self.env.conda_required_packages: key = pkg[0].split('==')[0] if (key not in self.env.installed_packages): self.env.conda_missing_packages.append(pkg) continue elif (len(pkg[0].split('==')) > 1): if (pkg[0].split('==')[1] != self.env.installed_conda_packages.get(key)): self.env.conda_missing_packages.append(pkg) continue def install_missing_dep(self): if self.env.conda_missing_packages: self.install_conda_packages() if self.env.missing_packages: self.install_python_packages() def install_python_packages(self): self.output.info('Installing Required Python Packages. This may take some time...') for pkg in self.env.missing_packages: if (self.env.is_conda and (not pkg.startswith('git'))): verbose = (pkg.startswith('tensorflow') or self.env.updater) pkg = CONDA_MAPPING.get(pkg, (pkg, None)) channel = (None if (len(pkg) != 2) else pkg[1]) pkg = pkg[0] if self.conda_installer(pkg, verbose=verbose, channel=channel, conda_only=False): continue self.pip_installer(pkg) def install_conda_packages(self): self.output.info('Installing Required Conda Packages. This may take some time...') for pkg in self.env.conda_missing_packages: channel = (None if (len(pkg) != 2) else pkg[1]) self.conda_installer(pkg[0], channel=channel, conda_only=True) def conda_installer(self, package, channel=None, verbose=False, conda_only=False): success = True condaexe = ['conda', 'install', '-y'] if ((not verbose) or self.env.updater): condaexe.append('-q') if channel: condaexe.extend(['-c', channel]) condaexe.append(package) self.output.info('Installing {}'.format(package)) shell = (self.env.os_version[0] == 'Windows') try: if verbose: run(condaexe, check=True, shell=shell) else: with open(os.devnull, 'w') as devnull: run(condaexe, stdout=devnull, stderr=devnull, check=True, shell=shell) except CalledProcessError: if (not conda_only): self.output.info('{} not available in Conda. Installing with pip'.format(package)) else: self.output.warning("Couldn't install {} with Conda. Please install this package manually".format(package)) success = False return success def pip_installer(self, package): pipexe = [sys.executable, '-m', 'pip'] pipexe.extend(['install', '--no-cache-dir']) if (not self.env.updater): pipexe.append('-qq') if ((not self.env.is_admin) and (not self.env.is_virtualenv)): pipexe.append('--user') msg = 'Installing {}'.format(package) self.output.info(msg) pipexe.append(package) try: run(pipexe, check=True) except CalledProcessError: self.output.warning("Couldn't install {} with pip. Please install this package manually".format(package))
class RtePVP(PVP): VERBALIZER = {'not_entailment': ['No'], 'entailment': ['Yes']} def get_parts(self, example: InputExample) -> FilledPattern: text_a = self.shortenable(example.text_a) text_b = self.shortenable(example.text_b.rstrip(string.punctuation)) if (self.pattern_id == 1): string_list_a = [text_a, 'Question:', text_b, '?', 'the', 'Answer:', self.mask, '.'] string_list_b = [] block_flag_a = [0, 0, 0, 0, 1, 0, 0, 0] block_flag_b = [] assert (len(string_list_a) == len(block_flag_a)) assert (len(string_list_b) == len(block_flag_b)) return (string_list_a, string_list_b, block_flag_a, block_flag_b) else: raise ValueError('unknown pattern_id.') def verbalize(self, label) -> List[str]: return RtePVP.VERBALIZER[label]
def build_lr_scheduler(optimizer, lr, lr_clip, lr_decay_list, lr_decay_rate, last_epoch): def lr_lbmd(cur_epoch): cur_decay = 1 for decay_step in lr_decay_list: if (cur_epoch >= decay_step): cur_decay = (cur_decay * lr_decay_rate) return max(cur_decay, (lr_clip / lr)) lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch) return lr_scheduler
('/api/file_system/move', methods=['POST']) def move_files() -> Response: request_json = request.get_json() (user_id, chat_id) = get_user_and_chat_id_from_request_json(request_json) root_path = create_personal_folder(user_id) nodes = request_json['nodes'] try: if (os.path.exists(root_path) and os.path.isdir(root_path)): current_path_tree_list: list = [] id_to_path_dict = {0: root_path} _path_tree_for_react_dnd_treeview(current_path_tree_list, id_to_path_dict, root_path, 0) for node in nodes: old_path = id_to_path_dict[node['id']] new_path = id_to_path_dict[node['parent']] shutil.move(old_path, new_path) logger.bind(user_id=user_id, chat_id=chat_id, api='/move', msg_head='Move file success').debug(f'from {old_path} to {new_path}') return jsonify({'success': True, 'message': 'File moved successfully'}) except Exception as e: logger.bind(user_id=user_id, chat_id=chat_id, api='/move', msg_head='Move file failed').error(str(e)) return jsonify({'success': False, 'message': str(e)}) return Response(response=None, status=f'{INTERNAL} Fail to move file')
class DtypeTestCase(ZiplineTestCase): def correct_dtype(cls, dtypes): _space(dtype_=dtypes) def test(self, dtype_): class Correct(cls): missing_value = missing_values.get(dtype_, NotSpecified) inputs = [] window_length = 1 dtype = dtype_ self.assertEqual(Correct().dtype, dtype_) return test def incorrect_dtype(cls, dtypes, hint): _space(dtype_=dtypes) def test(self, dtype_): with self.assertRaises(UnsupportedDataType) as e: class Incorrect(cls): missing_value = missing_values.get(dtype_, NotSpecified) inputs = [] window_length = 1 dtype = dtype_ Incorrect() self.assertIn(hint, str(e.exception)) self.assertIn(str(dtype_), str(e.exception)) return test test_custom_classifier_correct_dtypes = correct_dtype(CustomClassifier, CLASSIFIER_DTYPES) test_custom_classifier_factor_dtypes = incorrect_dtype(CustomClassifier, (FACTOR_DTYPES - CLASSIFIER_DTYPES), 'CustomFactor') test_custom_classifier_filter_dtypes = incorrect_dtype(CustomClassifier, (FILTER_DTYPES - CLASSIFIER_DTYPES), 'CustomFilter') test_custom_factor_correct_dtypes = correct_dtype(CustomFactor, FACTOR_DTYPES) test_custom_factor_classifier_dtypes = incorrect_dtype(CustomFactor, (CLASSIFIER_DTYPES - FACTOR_DTYPES), 'CustomClassifier') test_custom_factor_filter_dtypes = incorrect_dtype(CustomFactor, (FILTER_DTYPES - FACTOR_DTYPES), 'CustomFilter') test_custom_filter_correct_dtypes = correct_dtype(CustomFilter, FILTER_DTYPES) test_custom_filter_classifier_dtypes = incorrect_dtype(CustomFilter, (CLASSIFIER_DTYPES - FILTER_DTYPES), 'CustomClassifier') test_custom_filter_factor_dtypes = incorrect_dtype(CustomFilter, ((FACTOR_DTYPES - FILTER_DTYPES) - CLASSIFIER_DTYPES), 'CustomFactor')
def test_select_handle_free_center(view, item): view.scene.addItem(item) view.scale(0.5, 0.5) item.SELECT_FREE_CENTER = 10 with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)): assert (item.select_handle_free_center() == QtCore.QRectF(40, 30, 20, 20))
def test_03_callbacks(driver): with get_eel_server('examples/03 - sync_callbacks/sync_callbacks.py', 'sync_callbacks.html') as eel_url: driver.get(eel_url) assert (driver.title == 'Synchronous callbacks') console_logs = get_console_logs(driver, minimum_logs=1) assert ('Got this from Python:' in console_logs[0]['message']) assert ('callbacks.html' in console_logs[0]['message'])
class ChannelAttention(nn.Module): def __init__(self, channels): super().__init__() self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True) self.act = nn.Hardsigmoid(inplace=True) def forward(self, x: torch.Tensor) -> torch.Tensor: out = self.global_avgpool(x) out = self.fc(out) out = self.act(out) return (x * out)
class ArchiveCheck(_Archive): def open(self, relpath): abspath = os.path.join(str(self.path), relpath) return open(abspath, 'r') def relative_iterdir(self, relpath='.'): for p in pathlib.Path(self.path).iterdir(): (yield str(p.relative_to(self.path))) def _get_uuid(self): return os.path.basename(self.path)
def search_point_group_ops(cell, tol=SYMPREC): a = cell.lattice_vectors() G = np.dot(a, a.T) pbc_axis = np.array([1, 1, 1], dtype=bool) if (cell.dimension < 3): pbc_axis[cell.dimension:] = False a_norm = np.sqrt(np.diag(G)) a_angle = np.arccos((G / np.outer(a_norm, a_norm))) tol2 = (tol ** 2) rotations = [] for op in lib.cartesian_prod(([[1, 0, (- 1)]] * 9)): W = np.asarray(op, dtype=np.int32).reshape(3, 3) G_tilde = reduce(np.dot, (W.T, G, W)) a_tilde_norm = np.sqrt(np.diag(G_tilde)) length_error = np.abs((a_norm - a_tilde_norm)) if (length_error > tol).any(): continue tmp = (a_norm + a_tilde_norm) a_tilde_angle = np.arccos((G_tilde / np.outer(a_tilde_norm, a_tilde_norm))) angle_error = (((np.sin((a_angle - a_tilde_angle)) ** 2) * np.outer(tmp, tmp)) / 4) if (angle_error > tol2).any(): continue if (not (W[np.diag((~ pbc_axis))] == 1).all()): continue pbc_axis2 = np.logical_and.outer(pbc_axis, pbc_axis) if W[(~ (pbc_axis2 | np.eye(3, dtype=bool)))].any(): continue rotations.append(W) rotations = np.asarray(rotations) return rotations
def get_auth_headers(repository=None, scopes=None): headers = {} realm_auth_path = url_for('v2.generate_registry_jwt') authenticate = 'Bearer realm="{0}{1}",service="{2}"'.format(get_app_url(), realm_auth_path, app.config['SERVER_HOSTNAME']) if repository: scopes_string = 'repository:{0}'.format(repository) if scopes: scopes_string += (':' + ','.join(scopes)) authenticate += ',scope="{0}"'.format(scopes_string) headers['WWW-Authenticate'] = authenticate headers['Docker-Distribution-API-Version'] = 'registry/2.0' return headers
class OurMultiheadAttention(nn.Module): def __init__(self, feat_dim, n_head, d_k=None, d_v=None): super(OurMultiheadAttention, self).__init__() if (d_k is None): d_k = (feat_dim // n_head) if (d_v is None): d_v = (feat_dim // n_head) self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Conv2d(feat_dim, (n_head * d_k), 1, bias=False) self.w_ks = nn.Conv2d(feat_dim, (n_head * d_k), 1, bias=False) self.w_vs = nn.Conv2d(feat_dim, (n_head * d_v), 1, bias=False) self.fc = nn.Conv2d((n_head * d_v), feat_dim, 1, bias=False) def forward(self, q, k, v, attn_type='softmax', **kwargs): (d_k, d_v, n_head) = (self.d_k, self.d_v, self.n_head) q = self.w_qs(q).view(q.shape[0], n_head, d_k, q.shape[2], q.shape[3]) k = self.w_ks(k).view(k.shape[0], n_head, d_k, k.shape[2], k.shape[3]) v = self.w_vs(v).view(v.shape[0], n_head, d_v, v.shape[2], v.shape[3]) if (attn_type == 'softmax'): (q, attn) = softmax_attention(q, k, v) elif (attn_type == 'dotproduct'): (q, attn) = dotproduct_attention(q, k, v) elif (attn_type == 'patch'): (q, attn) = patch_attention(q, k, v, P=kwargs['P']) elif (attn_type == 'sparse_long'): (q, attn) = long_range_attention(q, k, v, P_h=kwargs['ah'], P_w=kwargs['aw']) elif (attn_type == 'sparse_short'): (q, attn) = short_range_attention(q, k, v, Q_h=kwargs['ah'], Q_w=kwargs['aw']) else: raise NotImplementedError(f'Unknown attention type {attn_type}') q = q.reshape(q.shape[0], (- 1), q.shape[3], q.shape[4]) q = self.fc(q) return (q, attn)
def execute_policy_on_repo(policy, repo_id, namespace_id, tag_page_limit=100): policy_to_func_map = {AutoPruneMethod.NUMBER_OF_TAGS.value: prune_repo_by_number_of_tags, AutoPruneMethod.CREATION_DATE.value: prune_repo_by_creation_date} if (policy_to_func_map.get(policy.method, None) is None): raise InvalidNamespaceAutoPruneMethod('Unsupported prune method type', policy.method) namespace = user.get_namespace_user_by_user_id(namespace_id) repo = repository.lookup_repository(repo_id) logger.debug('Executing autoprune policy: %s on repo: %s', policy.method, repo.name) policy_to_func_map[policy.method](repo, policy.config, namespace, tag_page_limit)
class Effect4321(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanLadarStrengthBonus', src.getModifiedItemAttr('subsystemBonusCaldariCore2'), skill='Caldari Core Systems', **kwargs) fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanRadarStrengthBonus', src.getModifiedItemAttr('subsystemBonusCaldariCore2'), skill='Caldari Core Systems', **kwargs) fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'maxRange', src.getModifiedItemAttr('subsystemBonusCaldariCore2'), skill='Caldari Core Systems', **kwargs) fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanGravimetricStrengthBonus', src.getModifiedItemAttr('subsystemBonusCaldariCore2'), skill='Caldari Core Systems', **kwargs) fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanMagnetometricStrengthBonus', src.getModifiedItemAttr('subsystemBonusCaldariCore2'), skill='Caldari Core Systems', **kwargs)
def test_verify_docs_python_org(benchmark, pytestconfig): limbo_root = pytestconfig.getoption('--x509-limbo-root', skip=True) with open(os.path.join(limbo_root, 'limbo.json'), 'rb') as f: [testcase] = [tc for tc in json.load(f)['testcases'] if (tc['id'] == 'online::docs.python.org')] with open(certifi.where(), 'rb') as f: store = x509.verification.Store(x509.load_pem_x509_certificates(f.read())) leaf = x509.load_pem_x509_certificate(testcase['peer_certificate'].encode()) intermediates = [x509.load_pem_x509_certificate(c.encode()) for c in testcase['untrusted_intermediates']] time = datetime.datetime.fromisoformat(testcase['validation_time']) def bench(): verifier = x509.verification.PolicyBuilder().store(store).time(time).build_server_verifier(x509.DNSName('docs.python.org')) verifier.verify(leaf, intermediates) benchmark(bench)
class QuantDtypeBwInfo(): def __init__(self, act_dtype: QuantizationDataType, act_bw: int, param_dtype: QuantizationDataType=QuantizationDataType.undefined, param_bw: int=0): self.act_dtype = act_dtype self.act_bw = act_bw self.param_dtype = param_dtype self.param_bw = param_bw self._validate_inputs() def __repr__(self): return f'(activation:({self.act_dtype}, {self.act_bw}) param:({self.param_dtype}, {self.param_bw})' def __str__(self): return f'activation:({self.act_dtype}, {self.act_bw}) param:({self.param_dtype}, {self.param_bw})' def __eq__(self, other): return ((self.act_dtype == other.act_dtype) and (self.act_bw == other.act_bw) and (self.param_dtype == other.param_dtype) and (self.param_bw == other.param_bw)) def _validate_inputs(self): if (self.param_dtype and self.param_bw): if ((self.param_dtype == QuantizationDataType.float) and (self.param_bw not in [16, 32])): raise ValueError(('float param_dtype can only be used when param_bw is set to 16, not ' + str(self.param_bw))) if ((self.act_dtype == QuantizationDataType.float) and (self.act_bw not in [16, 32])): raise ValueError(('float act_dtype can only be used when act_bw is set to 16, not ' + str(self.act_bw))) def is_same_activation(self, dtype: QuantizationDataType, bw: int): return ((bw == self.act_bw) and (dtype == self.act_dtype)) def is_same_param(self, dtype: QuantizationDataType, bw: int): return ((bw == self.param_bw) and (dtype == self.param_dtype)) def get_activation(self) -> tuple: return (self.act_dtype, self.act_bw) def get_param(self) -> tuple: return (self.param_dtype, self.param_bw)
def test_marker_union_intersect_single_marker() -> None: m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') intersection = m.intersect(parse_marker('implementation_name == "cpython"')) assert (str(intersection) == 'sys_platform == "darwin" and implementation_name == "cpython" or python_version < "3.4" and implementation_name == "cpython"')
.skip(reason='SRML server is undergoing maintenance as of 12-2023') _on_pvlib_version('0.11') .remote_data .flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY) def test_15_minute_dt_index(): with pytest.warns(pvlibDeprecationWarning, match='get_srml instead'): data = srml.read_srml_month_from_solardat('TW', 2019, 4, 'RQ') start = pd.Timestamp(' 00:00') start = start.tz_localize('Etc/GMT+8') end = pd.Timestamp(' 23:45') end = end.tz_localize('Etc/GMT+8') assert (data.index[0] == start) assert (data.index[(- 1)] == end) assert (data.index[3::4].minute == 45).all()
class ExtendedRequestSupport(): def _process_exception(resp, **kwargs): if ('slave' not in kwargs): err = {'message': 'Broadcast message, ignoring errors!!!'} elif isinstance(resp, ExceptionResponse): err = {'original_function_code': f'{resp.original_code} ({hex(resp.original_code)})', 'error_function_code': f'{resp.function_code} ({hex(resp.function_code)})', 'exception code': resp.exception_code, 'message': ModbusExceptions.decode(resp.exception_code)} elif isinstance(resp, ModbusIOException): err = {'original_function_code': f'{resp.fcode} ({hex(resp.fcode)})', 'error': resp.message} else: err = {'error': str(resp)} return err def read_coils(self, address, count=1, slave=0, **kwargs): resp = super().read_coils(address, count, slave, **kwargs) if (not resp.isError()): return {'function_code': resp.function_code, 'bits': resp.bits} return ExtendedRequestSupport._process_exception(resp, slave=slave) def read_discrete_inputs(self, address, count=1, slave=0, **kwargs): resp = super().read_discrete_inputs(address, count, slave, **kwargs) if (not resp.isError()): return {'function_code': resp.function_code, 'bits': resp.bits} return ExtendedRequestSupport._process_exception(resp, slave=slave) _brodcast def write_coil(self, address, value, slave=0, **kwargs): resp = super().write_coil(address, value, slave, **kwargs) return resp _brodcast def write_coils(self, address, values, slave=0, **kwargs): resp = super().write_coils(address, values, slave, **kwargs) return resp _brodcast def write_register(self, address, value, slave=0, **kwargs): resp = super().write_register(address, value, slave, **kwargs) return resp _brodcast def write_registers(self, address, values, slave=0, **kwargs): resp = super().write_registers(address, values, slave, **kwargs) return resp def read_holding_registers(self, address, count=1, slave=0, **kwargs): resp = super().read_holding_registers(address, count, slave, **kwargs) if (not resp.isError()): return {'function_code': resp.function_code, 'registers': resp.registers} return ExtendedRequestSupport._process_exception(resp, slave=slave) def read_input_registers(self, address, count=1, slave=0, **kwargs): resp = super().read_input_registers(address, count, slave, **kwargs) if (not resp.isError()): return {'function_code': resp.function_code, 'registers': resp.registers} return ExtendedRequestSupport._process_exception(resp, slave=slave) def readwrite_registers(self, read_address=0, read_count=0, write_address=0, values=0, slave=0, **kwargs): resp = super().readwrite_registers(read_address=read_address, read_count=read_count, write_address=write_address, values=values, slave=slave, **kwargs) if (not resp.isError()): return {'function_code': resp.function_code, 'registers': resp.registers} return ExtendedRequestSupport._process_exception(resp, slave=slave) def mask_write_register(self, address=0, and_mask=65535, or_mask=0, slave=0, **kwargs): resp = super().mask_write_register(address=address, and_mask=and_mask, or_mask=or_mask, slave=slave, **kwargs) if (not resp.isError()): return {'function_code': resp.function_code, 'address': resp.address, 'and mask': resp.and_mask, 'or mask': resp.or_mask} return ExtendedRequestSupport._process_exception(resp, slave=slave) def read_device_information(self, read_code=None, object_id=0, **kwargs): request = ReadDeviceInformationRequest(read_code, object_id, **kwargs) resp = self.execute(request) if (not resp.isError()): return {'function_code': resp.function_code, 'information': resp.information, 'object count': resp.number_of_objects, 'conformity': resp.conformity, 'next object id': resp.next_object_id, 'more follows': resp.more_follows, 'space left': resp.space_left} return ExtendedRequestSupport._process_exception(resp, slave=request.slave_id) def report_slave_id(self, slave=0, **kwargs): request = ReportSlaveIdRequest(slave, **kwargs) resp = self.execute(request) if (not resp.isError()): return {'function_code': resp.function_code, 'identifier': resp.identifier.decode('cp1252'), 'status': resp.status, 'byte count': resp.byte_count} return ExtendedRequestSupport._process_exception(resp, slave=slave) def read_exception_status(self, slave=0, **kwargs): request = ReadExceptionStatusRequest(slave, **kwargs) resp = self.execute(request) if (not resp.isError()): return {'function_code': resp.function_code, 'status': resp.status} return ExtendedRequestSupport._process_exception(resp, slave=request.slave_id) def get_com_event_counter(self, **kwargs): request = GetCommEventCounterRequest(**kwargs) resp = self.execute(request) if (not resp.isError()): return {'function_code': resp.function_code, 'status': resp.status, 'count': resp.count} return ExtendedRequestSupport._process_exception(resp, slave=request.slave_id) def get_com_event_log(self, **kwargs): request = GetCommEventLogRequest(**kwargs) resp = self.execute(request) if (not resp.isError()): return {'function_code': resp.function_code, 'status': resp.status, 'message count': resp.message_count, 'event count': resp.event_count, 'events': resp.events} return ExtendedRequestSupport._process_exception(resp, slave=request.slave_id) def _execute_diagnostic_request(self, request): resp = self.execute(request) if (not resp.isError()): return {'function code': resp.function_code, 'sub function code': resp.sub_function_code, 'message': resp.message} return ExtendedRequestSupport._process_exception(resp, slave=request.slave_id) def return_query_data(self, message=0, **kwargs): request = ReturnQueryDataRequest(message, **kwargs) return self._execute_diagnostic_request(request) def restart_comm_option(self, toggle=False, **kwargs): request = RestartCommunicationsOptionRequest(toggle, **kwargs) return self._execute_diagnostic_request(request) def return_diagnostic_register(self, data=0, **kwargs): request = ReturnDiagnosticRegisterRequest(data, **kwargs) return self._execute_diagnostic_request(request) def change_ascii_input_delimiter(self, data=0, **kwargs): request = ChangeAsciiInputDelimiterRequest(data, **kwargs) return self._execute_diagnostic_request(request) def force_listen_only_mode(self, data=0, **kwargs): request = ForceListenOnlyModeRequest(data, **kwargs) return self._execute_diagnostic_request(request) def clear_counters(self, data=0, **kwargs): request = ClearCountersRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_bus_message_count(self, data=0, **kwargs): request = ReturnBusMessageCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_bus_com_error_count(self, data=0, **kwargs): request = ReturnBusCommunicationErrorCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_bus_exception_error_count(self, data=0, **kwargs): request = ReturnBusExceptionErrorCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_slave_message_count(self, data=0, **kwargs): request = ReturnSlaveMessageCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_slave_no_response_count(self, data=0, **kwargs): request = ReturnSlaveNoResponseCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_slave_no_ack_count(self, data=0, **kwargs): request = ReturnSlaveNAKCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_slave_busy_count(self, data=0, **kwargs): request = ReturnSlaveBusyCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_slave_bus_char_overrun_count(self, data=0, **kwargs): request = ReturnSlaveBusCharacterOverrunCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def return_iop_overrun_count(self, data=0, **kwargs): request = ReturnIopOverrunCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def clear_overrun_count(self, data=0, **kwargs): request = ClearOverrunCountRequest(data, **kwargs) return self._execute_diagnostic_request(request) def get_clear_modbus_plus(self, data=0, **kwargs): request = GetClearModbusPlusRequest(data, **kwargs) return self._execute_diagnostic_request(request)
def test_layer_control_initialization(): layer_control = LayerControl() assert (layer_control._name == 'LayerControl') assert (layer_control.options['position'] == 'topright') assert (layer_control.options['collapsed'] is True) assert (layer_control.options['autoZIndex'] is True) assert (layer_control.draggable is False) assert (layer_control.base_layers == {}) assert (layer_control.overlays == {})
class Venv(): def __init__(self, path: Path, *, verbose: bool=False, python: str=DEFAULT_PYTHON) -> None: self.root = path self.python = python (self.bin_path, self.python_path, self.man_path) = get_venv_paths(self.root) self.pipx_metadata = PipxMetadata(venv_dir=path) self.verbose = verbose self.do_animation = (not verbose) try: self._existing = (self.root.exists() and bool(next(self.root.iterdir()))) except StopIteration: self._existing = False if (self._existing and self.uses_shared_libs): if shared_libs.is_valid: if shared_libs.needs_upgrade: shared_libs.upgrade(verbose=verbose) else: shared_libs.create(verbose) if (not shared_libs.is_valid): raise PipxError((pipx_wrap(f''' Error: pipx's shared venv {shared_libs.root} is invalid and needs re-installation. To fix this, install or reinstall a package. For example: ''') + f''' pipx install {self.root.name} --force'''), wrap_message=False) def name(self) -> str: if (self.pipx_metadata.main_package.package is not None): venv_name = f'{self.pipx_metadata.main_package.package}{self.pipx_metadata.main_package.suffix}' else: venv_name = self.root.name return venv_name def uses_shared_libs(self) -> bool: if self._existing: pth_files = self.root.glob(('**/' + PIPX_SHARED_PTH)) return (next(pth_files, None) is not None) else: return True def package_metadata(self) -> Dict[(str, PackageInfo)]: return_dict = self.pipx_metadata.injected_packages.copy() if (self.pipx_metadata.main_package.package is not None): return_dict[self.pipx_metadata.main_package.package] = self.pipx_metadata.main_package return return_dict def main_package_name(self) -> str: if (self.pipx_metadata.main_package.package is None): return self.root.name else: return self.pipx_metadata.main_package.package def create_venv(self, venv_args: List[str], pip_args: List[str], override_shared: bool=False) -> None: with animate('creating virtual environment', self.do_animation): cmd = [self.python, '-m', 'venv'] if (not override_shared): cmd.append('--without-pip') venv_process = run_subprocess(((cmd + venv_args) + [str(self.root)])) subprocess_post_check(venv_process) shared_libs.create(self.verbose) if (not override_shared): pipx_pth = (get_site_packages(self.python_path) / PIPX_SHARED_PTH) pipx_pth.write_text(f'''{shared_libs.site_packages} ''', encoding='utf-8') self.pipx_metadata.venv_args = venv_args self.pipx_metadata.python_version = self.get_python_version() def safe_to_remove(self) -> bool: return (not self._existing) def remove_venv(self) -> None: if self.safe_to_remove(): rmdir(self.root) else: logger.warning(pipx_wrap(f''' {hazard} Not removing existing venv {self.root} because it was not created in this session ''', subsequent_indent=(' ' * 4))) def upgrade_packaging_libraries(self, pip_args: List[str]) -> None: if self.uses_shared_libs: shared_libs.upgrade(pip_args=pip_args, verbose=self.verbose) else: self.upgrade_package_no_metadata('pip', pip_args) def uninstall_package(self, package: str, was_injected: bool=False): try: with animate(f'uninstalling {package}', self.do_animation): cmd = (['uninstall', '-y'] + [package]) self._run_pip(cmd) except PipxError as e: logging.info(e) raise PipxError(f'Error uninstalling {package}.') if was_injected: self.pipx_metadata.injected_packages.pop(package) self.pipx_metadata.write() def install_package(self, package_name: str, package_or_url: str, pip_args: List[str], include_dependencies: bool, include_apps: bool, is_main_package: bool, suffix: str='') -> None: package_or_url = fix_package_name(package_or_url, package_name) (package_or_url, pip_args) = parse_specifier_for_install(package_or_url, pip_args) with animate(f'installing {full_package_description(package_name, package_or_url)}', self.do_animation): cmd = [str(self.python_path), '-m', 'pip', '--no-input', 'install', *pip_args, package_or_url] pip_process = run_subprocess(cmd, log_stdout=False, log_stderr=False) subprocess_post_check_handle_pip_error(pip_process) if pip_process.returncode: raise PipxError(f'Error installing {full_package_description(package_name, package_or_url)}.') self._update_package_metadata(package_name=package_name, package_or_url=package_or_url, pip_args=pip_args, include_dependencies=include_dependencies, include_apps=include_apps, is_main_package=is_main_package, suffix=suffix) if (self.package_metadata[package_name].package_version is None): raise PipxError(f'''Unable to install {full_package_description(package_name, package_or_url)}. Check the name or spec for errors, and verify that it can be installed with pip.''', wrap_message=False) def install_unmanaged_packages(self, requirements: List[str], pip_args: List[str]) -> None: with animate(f"installing {', '.join(requirements)}", self.do_animation): cmd = [str(self.python_path), '-m', 'pip', '--no-input', 'install', *pip_args, *requirements] pip_process = run_subprocess(cmd, log_stdout=False, log_stderr=False) subprocess_post_check_handle_pip_error(pip_process) if pip_process.returncode: raise PipxError(f"Error installing {', '.join(requirements)}.") def install_package_no_deps(self, package_or_url: str, pip_args: List[str]) -> str: with animate(f'determining package name from {package_or_url!r}', self.do_animation): old_package_set = self.list_installed_packages() cmd = ['--no-input', 'install', '--no-dependencies', *pip_args, package_or_url] pip_process = self._run_pip(cmd) subprocess_post_check(pip_process, raise_error=False) if pip_process.returncode: raise PipxError(f''' Cannot determine package name from spec {package_or_url!r}. Check package spec for errors. ''') installed_packages = (self.list_installed_packages() - old_package_set) if (len(installed_packages) == 1): package_name = installed_packages.pop() logger.info(f'Determined package name: {package_name}') else: logger.info(f'old_package_set = {old_package_set}') logger.info(f'install_packages = {installed_packages}') raise PipxError(f''' Cannot determine package name from spec {package_or_url!r}. Check package spec for errors. ''') return package_name def get_venv_metadata_for_package(self, package_name: str, package_extras: Set[str]) -> VenvMetadata: data_start = time.time() venv_metadata = inspect_venv(package_name, package_extras, self.bin_path, self.python_path, self.man_path) logger.info(f'get_venv_metadata_for_package: {(1000.0 * (time.time() - data_start)):.0f}ms') return venv_metadata def _update_package_metadata(self, package_name: str, package_or_url: str, pip_args: List[str], include_dependencies: bool, include_apps: bool, is_main_package: bool, suffix: str='') -> None: venv_package_metadata = self.get_venv_metadata_for_package(package_name, get_extras(package_or_url)) package_info = PackageInfo(package=package_name, package_or_url=parse_specifier_for_metadata(package_or_url), pip_args=pip_args, include_apps=include_apps, include_dependencies=include_dependencies, apps=venv_package_metadata.apps, app_paths=venv_package_metadata.app_paths, apps_of_dependencies=venv_package_metadata.apps_of_dependencies, app_paths_of_dependencies=venv_package_metadata.app_paths_of_dependencies, man_pages=venv_package_metadata.man_pages, man_paths=venv_package_metadata.man_paths, man_pages_of_dependencies=venv_package_metadata.man_pages_of_dependencies, man_paths_of_dependencies=venv_package_metadata.man_paths_of_dependencies, package_version=venv_package_metadata.package_version, suffix=suffix) if is_main_package: self.pipx_metadata.main_package = package_info else: self.pipx_metadata.injected_packages[package_name] = package_info self.pipx_metadata.write() def get_python_version(self) -> str: return run_subprocess([str(self.python_path), '--version']).stdout.strip() def list_installed_packages(self, not_required=False) -> Set[str]: cmd_run = run_subprocess(([str(self.python_path), '-m', 'pip', 'list', '--format=json'] + (['--not-required'] if not_required else []))) pip_list = json.loads(cmd_run.stdout.strip()) return {x['name'] for x in pip_list} def _find_entry_point(self, app: str) -> Optional[EntryPoint]: if (not self.python_path.exists()): return None dists = Distribution.discover(name=self.main_package_name, path=[str(get_site_packages(self.python_path))]) for dist in dists: for ep in dist.entry_points: if ((ep.group == 'pipx.run') and (ep.name == app)): return ep return None def run_app(self, app: str, filename: str, app_args: List[str]) -> NoReturn: entry_point = self._find_entry_point(app) if (entry_point is None): exec_app(([str((self.bin_path / filename))] + app_args)) match = _entry_point_value_pattern.match(entry_point.value) assert (match is not None), 'invalid entry point' (module, attr) = match.group('module', 'attr') code = f'''import sys, {module} sys.argv[0] = {entry_point.name!r} sys.exit({module}.{attr}()) ''' exec_app(([str(self.python_path), '-c', code] + app_args)) def has_app(self, app: str, filename: str) -> bool: if (self._find_entry_point(app) is not None): return True return (self.bin_path / filename).is_file() def upgrade_package_no_metadata(self, package_name: str, pip_args: List[str]) -> None: with animate(f'upgrading {full_package_description(package_name, package_name)}', self.do_animation): pip_process = self._run_pip(((['--no-input', 'install'] + pip_args) + ['--upgrade', package_name])) subprocess_post_check(pip_process) def upgrade_package(self, package_name: str, package_or_url: str, pip_args: List[str], include_dependencies: bool, include_apps: bool, is_main_package: bool, suffix: str='') -> None: with animate(f'upgrading {full_package_description(package_name, package_or_url)}', self.do_animation): pip_process = self._run_pip(((['--no-input', 'install'] + pip_args) + ['--upgrade', package_or_url])) subprocess_post_check(pip_process) self._update_package_metadata(package_name=package_name, package_or_url=package_or_url, pip_args=pip_args, include_dependencies=include_dependencies, include_apps=include_apps, is_main_package=is_main_package, suffix=suffix) def _run_pip(self, cmd: List[str]) -> 'CompletedProcess[str]': cmd = ([str(self.python_path), '-m', 'pip'] + cmd) if (not self.verbose): cmd.append('-q') return run_subprocess(cmd) def run_pip_get_exit_code(self, cmd: List[str]) -> ExitCode: cmd = ([str(self.python_path), '-m', 'pip'] + cmd) if (not self.verbose): cmd.append('-q') returncode = run_subprocess(cmd, capture_stdout=False, capture_stderr=False).returncode if returncode: cmd_str = ' '.join((str(c) for c in cmd)) logger.error(f'{cmd_str!r} failed') return ExitCode(returncode)
_torch class AutoModelTest(unittest.TestCase): def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModel.from_pretrained(model_name) (model, loading_info) = AutoModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertModel) self.assertEqual(len(loading_info['missing_keys']), 0) self.assertEqual(len(loading_info['unexpected_keys']), 8) self.assertEqual(len(loading_info['mismatched_keys']), 0) self.assertEqual(len(loading_info['error_msgs']), 0) def test_model_for_pretraining_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForPreTraining.from_pretrained(model_name) (model, loading_info) = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForPreTraining) missing_keys = loading_info.pop('missing_keys') self.assertListEqual(['cls.predictions.decoder.bias'], missing_keys) for (key, value) in loading_info.items(): self.assertEqual(len(value), 0) def test_lmhead_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelWithLMHead.from_pretrained(model_name) (model, loading_info) = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) def test_model_for_causal_lm(self): for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = AutoModelForCausalLM.from_pretrained(model_name) (model, loading_info) = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, GPT2LMHeadModel) def test_model_for_masked_lm(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForMaskedLM.from_pretrained(model_name) (model, loading_info) = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) def test_model_for_encoder_decoder_lm(self): for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) (model, loading_info) = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, T5ForConditionalGeneration) def test_sequence_classification_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForSequenceClassification.from_pretrained(model_name) (model, loading_info) = AutoModelForSequenceClassification.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForSequenceClassification) def test_question_answering_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForQuestionAnswering.from_pretrained(model_name) (model, loading_info) = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForQuestionAnswering) _scatter def test_table_question_answering_model_from_pretrained(self): for model_name in TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, TapasConfig) model = AutoModelForTableQuestionAnswering.from_pretrained(model_name) (model, loading_info) = AutoModelForTableQuestionAnswering.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TapasForQuestionAnswering) def test_token_classification_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForTokenClassification.from_pretrained(model_name) (model, loading_info) = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForTokenClassification) def test_from_pretrained_identifier(self): model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(model, BertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER) self.assertIsInstance(model, RobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_pretrained_with_tuple_values(self): model = AutoModel.from_pretrained('sgugger/funnel-random-tiny') self.assertIsInstance(model, FunnelModel) config = copy.deepcopy(model.config) config.architectures = ['FunnelBaseModel'] model = AutoModel.from_config(config) self.assertIsInstance(model, FunnelBaseModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = AutoModel.from_pretrained(tmp_dir) self.assertIsInstance(model, FunnelBaseModel) def test_parents_and_children_in_mappings(self): mappings = (MODEL_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) for mapping in mappings: mapping = tuple(mapping.items()) for (index, (child_config, child_model)) in enumerate(mapping[1:]): for (parent_config, parent_model) in mapping[:(index + 1)]: assert (not issubclass(child_config, parent_config)), f'{child_config.__name__} is child of {parent_config.__name__}' if (not isinstance(child_model, (list, tuple))): child_model = (child_model,) if (not isinstance(parent_model, (list, tuple))): parent_model = (parent_model,) for (child, parent) in [(a, b) for a in child_model for b in parent_model]: assert (not issubclass(child, parent)), f'{child.__name__} is child of {parent.__name__}' def test_from_pretrained_dynamic_model_local(self): try: AutoConfig.register('custom', CustomConfig) AutoModel.register(CustomConfig, CustomModel) config = CustomConfig(hidden_size=32) model = CustomModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True) for (p1, p2) in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) finally: if ('custom' in CONFIG_MAPPING._extra_content): del CONFIG_MAPPING._extra_content['custom'] if (CustomConfig in MODEL_MAPPING._extra_content): del MODEL_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_model_distant(self): model = AutoModel.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=True) self.assertEqual(model.__class__.__name__, 'NewModel') model = AutoModel.from_pretrained('hf-internal-testing/test_dynamic_model_with_util', trust_remote_code=True) self.assertEqual(model.__class__.__name__, 'NewModel') def test_new_model_registration(self): AutoConfig.register('custom', CustomConfig) auto_classes = [AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoModelForTokenClassification] try: for auto_class in auto_classes: with self.subTest(auto_class.__name__): with self.assertRaises(ValueError): auto_class.register(BertConfig, CustomModel) auto_class.register(CustomConfig, CustomModel) with self.assertRaises(ValueError): auto_class.register(BertConfig, BertModel) tiny_config = BertModelTester(self).get_config() config = CustomConfig(**tiny_config.to_dict()) model = auto_class.from_config(config) self.assertIsInstance(model, CustomModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = auto_class.from_pretrained(tmp_dir) self.assertIsInstance(new_model, CustomModel) finally: if ('custom' in CONFIG_MAPPING._extra_content): del CONFIG_MAPPING._extra_content['custom'] for mapping in (MODEL_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING): if (CustomConfig in mapping._extra_content): del mapping._extra_content[CustomConfig] def test_repo_not_found(self): with self.assertRaisesRegex(EnvironmentError, 'bert-base is not a local folder and is not a valid model identifier'): _ = AutoModel.from_pretrained('bert-base') def test_revision_not_found(self): with self.assertRaisesRegex(EnvironmentError, 'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)'): _ = AutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision='aaaaaa') def test_model_file_not_found(self): with self.assertRaisesRegex(EnvironmentError, 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin'): _ = AutoModel.from_pretrained('hf-internal-testing/config-no-model') def test_model_from_tf_suggestion(self): with self.assertRaisesRegex(EnvironmentError, 'Use `from_tf=True` to load this model'): _ = AutoModel.from_pretrained('hf-internal-testing/tiny-bert-tf-only') def test_model_from_flax_suggestion(self): with self.assertRaisesRegex(EnvironmentError, 'Use `from_flax=True` to load this model'): _ = AutoModel.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
class ASPP(nn.Module): def __init__(self, backbone, output_stride, BatchNorm): super(ASPP, self).__init__() if (backbone == 'drn'): inplanes = 512 elif (backbone == 'mobilenet'): inplanes = 320 else: inplanes = 2048 if (output_stride == 16): dilations = [1, 6, 12, 18] elif (output_stride == 8): dilations = [1, 12, 24, 36] else: raise NotImplementedError self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm) self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm) self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm) self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm) self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(inplanes, 256, 1, stride=1, bias=False), BatchNorm(256), nn.ReLU()) self.conv1 = nn.Conv2d(1280, 256, 1, bias=False) self.bn1 = BatchNorm(256) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.5) self._init_weight() def forward(self, x): x1 = self.aspp1(x) x2 = self.aspp2(x) x3 = self.aspp3(x) x4 = self.aspp4(x) x5 = self.global_avg_pool(x) x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) x = torch.cat((x1, x2, x3, x4, x5), dim=1) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) return self.dropout(x) def _init_weight(self): for m in self.modules(): if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, SynchronizedBatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def _find_caller(): frame = sys._getframe(2) while frame: code = frame.f_code if (os.path.join('utils', 'logger.') not in code.co_filename): mod_name = frame.f_globals['__name__'] if (mod_name == '__main__'): mod_name = 'detectron2' return (mod_name, (code.co_filename, frame.f_lineno, code.co_name)) frame = frame.f_back
def init(disp, info): disp.extension_add_method('display', 'xinput_query_version', query_version) disp.extension_add_method('window', 'xinput_select_events', select_events) disp.extension_add_method('display', 'xinput_query_device', query_device) disp.extension_add_method('window', 'xinput_grab_device', grab_device) disp.extension_add_method('display', 'xinput_ungrab_device', ungrab_device) disp.extension_add_method('window', 'xinput_grab_keycode', grab_keycode) disp.extension_add_method('window', 'xinput_ungrab_keycode', ungrab_keycode) disp.extension_add_method('display', 'xinput_get_device_property', get_device_property) disp.extension_add_method('display', 'xinput_list_device_properties', list_device_properties) disp.extension_add_method('display', 'xinput_change_device_property', change_device_property) disp.extension_add_method('display', 'xinput_delete_device_property', delete_device_property) if hasattr(disp, 'ge_add_event_data'): for device_event in (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion): disp.ge_add_event_data(info.major_opcode, device_event, DeviceEventData) disp.ge_add_event_data(info.major_opcode, DeviceChanged, DeviceEventData) disp.ge_add_event_data(info.major_opcode, HierarchyChanged, HierarchyEventData) disp.ge_add_event_data(info.major_opcode, PropertyEvent, PropertyEventData)
def test_dsl_async_cmd_dict_run_must_exist(): with pytest.raises(ContextError) as err: AsyncCmdStep('blah', Context({'cmds': {'runs': 'abc'}})) assert (str(err.value) == "cmds.run doesn't exist for blah.\nThe input should look like this in expanded syntax:\ncmds:\n run:\n - ./my-executable --arg\n - subdir/executable --arg1\n cwd: ./mydir\n\nIf you're passing in a list of commands, each command should be a simple string,\nor a sub-list of commands to run in serial,\nor a dict with a `run` entry:\ncmds:\n - ./my-executable --arg\n - run: ./another-executable --arg value\n cwd: ../mydir/subdir\n - run:\n - ./arb-executable1 --arg value1\n - [./arb-executable2.1, ./arb-executable2.2]\n cwd: ../mydir/arbdir\n - [./arb-executable3.1, ./arb-executable3.2]")
def create_gunicorn_worker(): engines = set([config[0] for config in list(app.config.get('DISTRIBUTED_STORAGE_CONFIG', {}).values())]) feature_flag = ('SwiftStorage' in engines) worker = GunicornWorker(__name__, app, ChunkCleanupWorker(chunk_cleanup_queue, poll_period_seconds=POLL_PERIOD_SECONDS), feature_flag) return worker
class VHeuristic(): def __init__(self): self.staticParts = [] self.randomPart = None self.minPart = None self.maxPart = None def random(self, variables=None): variables = flatten(variables) checkType(variables, ([Variable], type(None))) self.randomPart = (variables,) return self def _opt(self, variables, type): if variables: variables = flatten(variables) checkType(variables, [Variable]) types = (TypeVarHeuristic if isinstance(self, VarHeuristic) else TypeValHeuristic) assert (isinstance(type, str) and all(((p in [t.name for t in types]) for p in re.split('/|\\+', type)))), ('Bad value for ' + type) return (variables, type) def min(self, variables=None, *, type): self.minPart = self._opt(variables, type) return self def max(self, variables=None, *, type): self.maxPart = self._opt(variables, type) return self
class _TfDataCheckpointer(): def __init__(self, dataset_iterator: tf.data.Iterator): self._dataset_ckpt = tf.train.Checkpoint(ds=dataset_iterator) def save(self, filename: str): self._dataset_ckpt.write(filename) def load(self, filename: str): self._dataset_ckpt.read(filename).assert_consumed()
def setup_virtual_environments(distributions: dict[(str, PackageDependencies)], args: TestConfig, tempdir: Path) -> None: if (not distributions): return no_external_dependencies_venv = VenvInfo(pip_exe='', python_exe=sys.executable) external_requirements_to_distributions: defaultdict[(frozenset[str], list[str])] = defaultdict(list) num_pkgs_with_external_reqs = 0 for (distribution_name, requirements) in distributions.items(): if requirements.external_pkgs: num_pkgs_with_external_reqs += 1 external_requirements = frozenset(requirements.external_pkgs) external_requirements_to_distributions[external_requirements].append(distribution_name) else: _DISTRIBUTION_TO_VENV_MAPPING[distribution_name] = no_external_dependencies_venv if (num_pkgs_with_external_reqs == 0): if args.verbose: print(colored('No additional venvs are required to be set up', 'blue')) return requirements_sets_to_venvs: dict[(frozenset[str], VenvInfo)] = {} if args.verbose: num_venvs = len(external_requirements_to_distributions) msg = f"Setting up {num_venvs} venv{('s' if (num_venvs != 1) else '')} for {num_pkgs_with_external_reqs} distribution{('s' if (num_pkgs_with_external_reqs != 1) else '')}... " print(colored(msg, 'blue'), end='', flush=True) venv_start_time = time.perf_counter() with concurrent.futures.ProcessPoolExecutor() as executor: venv_info_futures = [executor.submit(setup_venv_for_external_requirements_set, requirements_set, tempdir) for requirements_set in external_requirements_to_distributions] for venv_info_future in concurrent.futures.as_completed(venv_info_futures): (requirements_set, venv_info) = venv_info_future.result() requirements_sets_to_venvs[requirements_set] = venv_info venv_elapsed_time = (time.perf_counter() - venv_start_time) if args.verbose: print(colored(f'took {venv_elapsed_time:.2f} seconds', 'blue')) pip_start_time = time.perf_counter() with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: pip_install_futures = [executor.submit(install_requirements_for_venv, venv_info, args, requirements_set) for (requirements_set, venv_info) in requirements_sets_to_venvs.items()] concurrent.futures.wait(pip_install_futures) pip_elapsed_time = (time.perf_counter() - pip_start_time) if args.verbose: msg = f'Combined time for installing requirements across all venvs: {pip_elapsed_time:.2f} seconds' print(colored(msg, 'blue')) for (requirements_set, distribution_list) in external_requirements_to_distributions.items(): venv_to_use = requirements_sets_to_venvs[requirements_set] _DISTRIBUTION_TO_VENV_MAPPING.update(dict.fromkeys(distribution_list, venv_to_use))
def liouvillian_ref(H, c_ops=()): L = (((- 1j) * (qutip.spre(H) - qutip.spost(H))) if H else 0) for c in c_ops: if c.issuper: L += c else: cdc = (c.dag() * c) L += qutip.sprepost(c, c.dag()) L -= (0.5 * (qutip.spre(cdc) + qutip.spost(cdc))) return L
def generate_auto_eval_text(e1, e2, print_text=True): answer_1 = e1['response'] answer_2 = e2['response'] instruction = e1['prompt'] instruction2 = e2['prompt'] assert (instruction == instruction2) eval_prompt = 'We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.' auto_eval_text = f'''[Question] {instruction} [The Start of Assistant 1's Answer] {answer_1} [The End of Assistant 1's Answer] [The Start of Assistant 2's Answer] {answer_2} [The End of Assistant 2's Answer] [System] {eval_prompt} ''' if print_text: print(auto_eval_text) return auto_eval_text
def test_activate_activates_non_existing_virtualenv_no_envs_file(mocker: MockerFixture, tester: CommandTester, venv_cache: Path, venv_name: str, venvs_in_cache_config: None) -> None: mocker.patch('shutil.which', side_effect=(lambda py: f'/usr/bin/{py}')) mocker.patch('subprocess.check_output', side_effect=check_output_wrapper()) mock_build_env = mocker.patch('poetry.utils.env.EnvManager.build_venv', side_effect=build_venv) tester.execute('3.7') venv_py37 = (venv_cache / f'{venv_name}-py3.7') mock_build_env.assert_called_with(venv_py37, executable=Path('/usr/bin/python3.7'), flags={'always-copy': False, 'system-site-packages': False, 'no-pip': False, 'no-setuptools': False}, prompt='simple-project-py3.7') envs_file = TOMLFile((venv_cache / 'envs.toml')) assert envs_file.exists() envs: dict[(str, Any)] = envs_file.read() assert (envs[venv_name]['minor'] == '3.7') assert (envs[venv_name]['patch'] == '3.7.1') assert (tester.io.fetch_error() == f'''Creating virtualenv {venv_py37.name} in {venv_py37.parent} ''') assert (tester.io.fetch_output() == f'''Using virtualenv: {venv_py37} ''')
def upgrade_config(cfg: CN, to_version: Optional[int]=None) -> CN: cfg = cfg.clone() if (to_version is None): to_version = _C.VERSION assert (cfg.VERSION <= to_version), 'Cannot upgrade from v{} to v{}!'.format(cfg.VERSION, to_version) for k in range(cfg.VERSION, to_version): converter = globals()[('ConverterV' + str((k + 1)))] converter.upgrade(cfg) cfg.VERSION = (k + 1) return cfg
class TestInputInvoiceMessageContentBase(): title = 'invoice title' description = 'invoice description' payload = 'invoice payload' provider_token = 'provider token' currency = 'PTBCoin' prices = [LabeledPrice('label1', 42), LabeledPrice('label2', 314)] max_tip_amount = 420 suggested_tip_amounts = [314, 256] provider_data = 'provider data' photo_url = 'photo_url' photo_size = 314 photo_width = 420 photo_height = 256 need_name = True need_phone_number = True need_email = True need_shipping_address = True send_phone_number_to_provider = True send_email_to_provider = True is_flexible = True
class AMPClientFactory(protocol.ReconnectingClientFactory): initialDelay = 1 factor = 1.5 maxDelay = 1 noisy = False def __init__(self, server): self.server = server self.protocol = AMPServerClientProtocol self.maxDelay = 10 self.broadcasts = [] def startedConnecting(self, connector): pass def buildProtocol(self, addr): self.resetDelay() self.server.amp_protocol = AMPServerClientProtocol() self.server.amp_protocol.factory = self return self.server.amp_protocol def clientConnectionLost(self, connector, reason): logger.log_info('Server disconnected from the portal.') protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason) def clientConnectionFailed(self, connector, reason): logger.log_msg('Attempting to reconnect to Portal ...') protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
.requires_cython def test_CoeffReuse(): coeff1 = coefficient('cos(w * t * pi)', args={'w': 3.0}) coeff2 = coefficient('cos(w2*t * pi)', args={'w2': 1.2}) coeff3 = coefficient('cos( my_var * t*pi)', args={'my_var': (- 1.2)}) assert isinstance(coeff2, coeff1.__class__) assert isinstance(coeff3, coeff1.__class__)
class TestJob(TestCase): def test_create_job(self): job = Job(random_job, 'hi', arg2='there') self.assertIsInstance(job, Job) def test_create_job_from_message(self): message = Message(body=dumps({'callable': random_job, 'args': (), 'kwargs': {}})) job = Job.from_message(message) self.assertEqual(message.get_body(), job.message.get_body()) def test_run(self): job = Job(random_job, 'hi', arg2='there') job.run() self.assertTrue((job.run_time >= 1)) self.assertEqual(job.result, 'yo!') job = Job(bad_job) job.run() self.assertIsInstance(job.exception, ZeroDivisionError)
def _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if (not _ebcdic_to_ascii_map): emap = (0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15, 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31, 128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7, 144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26, 32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33, 38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94, 45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63, 186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34, 195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200, 201, 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205, 206, 207, 208, 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 123, 65, 66, 67, 68, 69, 70, 71, 72, 73, 232, 233, 234, 235, 236, 237, 125, 74, 75, 76, 77, 78, 79, 80, 81, 82, 238, 239, 240, 241, 242, 243, 92, 159, 83, 84, 85, 86, 87, 88, 89, 90, 244, 245, 246, 247, 248, 249, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 250, 251, 252, 253, 254, 255) import string _ebcdic_to_ascii_map = string.maketrans(''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map)
def run_one_process(rank, world_size, args, cfg): import numpy as np np.set_printoptions(3) set_random_seed((args.seed + rank)) if (is_not_null(cfg.env_cfg) and (len(args.gpu_ids) > 0)): if (args.sim_gpu_ids is not None): assert (len(args.sim_gpu_ids) == len(args.gpu_ids)), 'Number of simulation gpus should be the same as the training gpus recently!' else: args.sim_gpu_ids = args.gpu_ids cfg.env_cfg.device = f'cuda:{args.sim_gpu_ids[rank]}' work_dir = args.work_dir logger_file = osp.join(work_dir, f'{args.timestamp}-{args.name_suffix}.log') logger = get_logger(name=None, log_file=logger_file, log_level=cfg.get('log_level', 'INFO')) logger.info(f'''Config: {cfg.pretty_text}''') logger.info(f'Set random seed to {args.seed}') if (is_not_null(cfg.replay_cfg) and (not args.evaluation)): logger.info(f'Build replay buffer!') from pyrl.env import build_replay replay = build_replay(cfg.replay_cfg) else: replay = None if ((not args.evaluation) and is_not_null(cfg.rollout_cfg)): from pyrl.env import build_rollout logger.info(f'Build rollout!') rollout_cfg = cfg.rollout_cfg rollout_cfg['env_cfg'] = deepcopy(cfg.env_cfg) rollout = build_rollout(rollout_cfg) else: rollout = None if (is_not_null(cfg.eval_cfg) and (rank == 0)): from pyrl.env import build_evaluation logger.info(f'Build evaluation!') eval_cfg = cfg.eval_cfg if (eval_cfg.get('env_cfg', None) is None): eval_cfg['env_cfg'] = deepcopy(cfg.env_cfg) elif (eval_cfg['env_cfg'] is not None): tmp = eval_cfg['env_cfg'] eval_cfg['env_cfg'] = deepcopy(cfg.env_cfg) eval_cfg['env_cfg'].update(tmp) get_logger().info(f'Building evaluation: eval_cfg: {eval_cfg}') evaluator = build_evaluation(eval_cfg) else: evaluator = None (obs_shape, action_shape) = (None, None) if is_not_null(cfg.env_cfg): logger.info(f'Get obs shape!') from pyrl.env import get_env_info env_params = get_env_info(cfg.env_cfg) cfg.agent_cfg['env_params'] = env_params obs_shape = env_params['obs_shape'] action_shape = env_params['action_shape'] logger.info(f"State shape:{env_params['obs_shape']}, action shape:{env_params['action_shape']}") elif is_not_null(replay): obs_shape = None for obs_key in ['inputs', 'obs']: if (obs_key in replay.memory): obs_shape = replay.memory.take(0).shape.memory[obs_key] break if (is_not_null(obs_shape) or is_not_null(action_shape)): from pyrl.networks.utils import get_kwargs_from_shape, replace_placeholder_with_args replaceable_kwargs = get_kwargs_from_shape(obs_shape, action_shape) cfg = replace_placeholder_with_args(cfg, **replaceable_kwargs) main_rl(rollout, evaluator, replay, args, cfg) if is_not_null(evaluator): evaluator.close() logger.info('Close evaluator object') if is_not_null(rollout): rollout.close() logger.info('Close rollout object') if is_not_null(replay): del replay logger.info('Delete replay buffer')
class QueueCommandManager(CommandManager): def __init__(self): super(QueueCommandManager, self).__init__() self.queue = Queue() self.sent = [] def enqueue(self, fn): self.queue.put(fn) def user_wait(self, duration): self.enqueue((lambda t: sleep((duration + int(PY3))))) def clear(self): self.queue = Queue() def user_next_command(self, tracer): try: self.queue.get_nowait()(tracer) except Empty: return def send(self, msg): self.sent.append(json.loads(msg)) user_stop = clear def start(self, tracer, auth_msg=''): pass
def grok_station_xml(data, tmin, tmax): stations = {} for (sta, sta_epo, cha, cha_epo) in xmlzip(data, ('Station', 'StationEpoch', 'Channel', 'Epoch')): (sta_beg, sta_end, cha_beg, cha_end) = [tdatetime(x) for x in (sta_epo.StartDate, sta_epo.EndDate, cha_epo.StartDate, cha_epo.EndDate)] if (not ((sta_beg <= tmin) and (tmax <= sta_end) and (cha_beg <= tmin) and (tmax <= cha_end))): continue nslc = tuple([str(x.strip()) for x in (sta.net_code, sta.sta_code, cha.loc_code, cha.chan_code)]) (lat, lon, ele, dep, azi, dip) = [float(cha_epo.attrs[x]) for x in 'Lat Lon Elevation Depth Azimuth Dip'.split()] nsl = nslc[:3] if (nsl not in stations): stations[nsl] = model.Station(nsl[0], nsl[1], nsl[2], lat, lon, ele, dep) stations[nsl].add_channel(model.station.Channel(nslc[(- 1)], azi, dip)) return list(stations.values())
class _RandomSplitterIterDataPipe(IterDataPipe): def __init__(self, source_datapipe: IterDataPipe, total_length: int, weights: Dict[(T, Union[(int, float)])], seed): self.source_datapipe: IterDataPipe = source_datapipe self.total_length: int = total_length self.remaining_length: int = total_length self._seed = seed self.keys: List[T] = list(weights.keys()) self.key_to_index: Dict[(T, int)] = {k: i for (i, k) in enumerate(self.keys)} self.norm_weights: List[float] = self.normalize_weights([weights[k] for k in self.keys], total_length) self.weights: List[float] = self.norm_weights.copy() self._rng = random.Random(self._seed) self._lengths: List[int] = [] def draw(self) -> T: selected_key = self._rng.choices(self.keys, self.weights)[0] index = self.key_to_index[selected_key] self.weights[index] -= 1 self.remaining_length -= 1 if (self.weights[index] < 0): self.weights[index] = 0 self.weights = self.normalize_weights(self.weights, self.remaining_length) return selected_key def normalize_weights(weights: List[float], total_length: int) -> List[float]: total_weight = sum(weights) return [((float(w) * total_length) / total_weight) for w in weights] def reset(self) -> None: self._rng = random.Random(self._seed) self.weights = self.norm_weights.copy() self.remaining_length = self.total_length def override_seed(self, seed): self._seed = seed return self def __getstate__(self): state = (self.source_datapipe, self.total_length, self._seed, self.norm_weights, self.keys, self.key_to_index, self.weights, self._rng.getstate()) if (IterDataPipe.getstate_hook is not None): return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): (self.source_datapipe, self.total_length, self._seed, self.norm_weights, self.keys, self.key_to_index, self.weights, rng_state) = state self._rng = random.Random() self._rng.setstate(rng_state) def get_length(self, target: T) -> int: if (not self._lengths): if (all((w.is_integer() for w in self.norm_weights)) and (sum(self.norm_weights) == self.total_length)): self._lengths = [int(w) for w in self.norm_weights] else: raise TypeError('Lengths of the split cannot be known in advance. Please supply integer `weights` that sum up to `total_length`.\nAlternatively, use `datapipe.set_length(LENGTH)` to manually set the desired length.') index = self.key_to_index[target] return self._lengths[index]
class Migration(migrations.Migration): dependencies = [('contenttypes', '0002_remove_content_type_name'), ('sponsors', '0029_auto__2015')] operations = [migrations.CreateModel(name='BenefitFeature', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'))], options={'verbose_name': 'Benefit Feature', 'verbose_name_plural': 'Benefit Features'}), migrations.AlterModelOptions(name='logoplacementconfiguration', options={'base_manager_name': 'objects', 'verbose_name': 'Logo Placement Configuration', 'verbose_name_plural': 'Logo Placement Configurations'}), migrations.CreateModel(name='LogoPlacement', fields=[('benefitfeature_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='sponsors.BenefitFeature')), ('publisher', models.CharField(choices=[('psf', 'Foundation'), ('pycon', 'Pycon'), ('pypi', 'Pypi'), ('core', 'Core Dev')], help_text='On which site should the logo be displayed?', max_length=30, verbose_name='Publisher')), ('logo_place', models.CharField(choices=[('sidebar', 'Sidebar'), ('sponsors', 'Sponsors Page'), ('jobs', 'Jobs'), ('blogpost', 'Blog'), ('footer', 'Footer'), ('docs', 'Docs'), ('download', 'Download Page'), ('devguide', 'Dev Guide')], help_text='Where the logo should be placed?', max_length=30, verbose_name='Logo Placement'))], options={'verbose_name': 'Logo Placement', 'verbose_name_plural': 'Logo Placement', 'abstract': False, 'base_manager_name': 'objects'}, bases=('sponsors.benefitfeature', models.Model)), migrations.AddField(model_name='benefitfeature', name='polymorphic_ctype', field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_sponsors.benefitfeature_set+', to='contenttypes.ContentType')), migrations.AddField(model_name='benefitfeature', name='sponsor_benefit', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sponsors.SponsorBenefit'))]
class AmountDialog(Factory.Popup): show_max = BooleanProperty(False) app = App.get_running_app() def __init__(self, show_max, amount, cb): Factory.Popup.__init__(self) self.show_max = show_max self.callback = cb if amount: self.ids.kb.amount = amount def update_amount(self, c): kb = self.ids.kb amount = (kb.fiat_amount if kb.is_fiat else kb.amount) if (c == '<'): amount = amount[:(- 1)] elif ((c == '.') and (amount in ['0', ''])): amount = '0.' elif (amount == '0'): amount = c else: try: Decimal((amount + c)) amount += c except: pass if ((not kb.is_fiat) and ('.' in amount)): p = amount.find('.') amount = amount.replace('.', '') amount = ((amount[:p] + '.') + amount[p:(p + self.app.decimal_point())]) if kb.is_fiat: kb.fiat_amount = amount else: kb.amount = amount
class FactorizedInteraction(nn.Module): def __init__(self, input_dim, output_dim, bias=True, residual_type='sum'): super(FactorizedInteraction, self).__init__() self.residual_type = residual_type if (residual_type == 'sum'): output_dim = (output_dim * 2) else: assert ((output_dim % 2) == 0), 'output_dim should be divisible by 2.' self.linear = nn.Linear(input_dim, output_dim, bias=bias) def forward(self, x): h = self.linear(x) (h2, h1) = torch.chunk(h, chunks=2, dim=(- 1)) if (self.residual_type == 'concat'): h = torch.cat([h2, (h1 * h2)], dim=(- 1)) elif (self.residual_type == 'sum'): h = (h2 + (h1 * h2)) return h
class RateDistortionLoss(nn.Module): def __init__(self, lmbda=0.01, metrics='mse'): super().__init__() self.mse = nn.MSELoss() self.lmbda = lmbda self.metrics = metrics def forward(self, output, target): (N, _, H, W) = target.size() out = {} num_pixels = ((N * H) * W) out['bpp_loss'] = sum(((torch.log(likelihoods).sum() / ((- math.log(2)) * num_pixels)) for likelihoods in output['likelihoods'].values())) if (self.metrics == 'mse'): out['mse_loss'] = self.mse(output['x_hat'], target) out['ms_ssim_loss'] = None out['loss'] = (((self.lmbda * (255 ** 2)) * out['mse_loss']) + out['bpp_loss']) elif (self.metrics == 'ms-ssim'): out['mse_loss'] = None out['ms_ssim_loss'] = (1 - ms_ssim(output['x_hat'], target, data_range=1.0)) out['loss'] = ((self.lmbda * out['ms_ssim_loss']) + out['bpp_loss']) return out
class Effect6733(BaseEffect): type = ('active', 'gang') def handler(fit, module, context, projectionRange, **kwargs): for x in range(1, 5): if module.getModifiedChargeAttr('warfareBuff{}ID'.format(x)): value = module.getModifiedItemAttr('warfareBuff{}Value'.format(x)) id = module.getModifiedChargeAttr('warfareBuff{}ID'.format(x)) if id: fit.addCommandBonus(id, value, module, kwargs['effect'])
class ModelArguments(): model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."}) model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
class Effect8362(BaseEffect): runTime = 'early' type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Warp Disrupt Field Generator')), 'signatureRadiusBonus', ship.getModifiedItemAttr('eliteBonusHeavyInterdictors3'), **kwargs)
class FeesEstimateRequest(MWSDataType): def __init__(self, marketplace_id: MarketplaceEnumOrStr, id_type: str, id_value: str, price_to_estimate_fees: PriceToEstimateFees, is_amazon_fulfilled: bool, identifier: str): self.marketplace_id = marketplace_id self.id_type = id_type self.id_value = id_value self.identifier = identifier self.is_amazon_fulfilled = is_amazon_fulfilled if (not isinstance(price_to_estimate_fees, PriceToEstimateFees)): raise ValueError('price_to_estimate_fees must be a PriceToEstimateFees model instance') self.price_to_estimate_fees = price_to_estimate_fees def params_dict(self) -> dict: data = {'MarketplaceId': self.marketplace_id, 'IdType': self.id_type, 'IdValue': self.id_value, 'Identifier': self.identifier, 'IsAmazonFulfilled': self.is_amazon_fulfilled} data.update(self.price_to_estimate_fees.to_params(prefix='PriceToEstimateFees')) return data
def get_model(args, DATASET_CONFIG): if args.use_height: num_input_channel = ((int(args.use_color) * 3) + 1) else: num_input_channel = (int(args.use_color) * 3) model = GroupFreeDetector_DA(num_class=DATASET_CONFIG.num_class, num_heading_bin=DATASET_CONFIG.num_heading_bin, num_size_cluster=DATASET_CONFIG.num_size_cluster, mean_size_arr=DATASET_CONFIG.mean_size_arr, input_feature_dim=num_input_channel, width=args.width, bn_momentum=args.bn_momentum, sync_bn=(True if args.syncbn else False), num_proposal=args.num_target, sampling=args.sampling, dropout=args.transformer_dropout, activation=args.transformer_activation, nhead=args.nhead, num_decoder_layers=args.num_decoder_layers, dim_feedforward=args.dim_feedforward, self_position_embedding=args.self_position_embedding, cross_position_embedding=args.cross_position_embedding) criterion = get_loss_DA return (model, criterion)
class SummaryTracker(object): def __init__(self, ignore_self=True): self.s0 = summary.summarize(muppy.get_objects()) self.summaries = {} self.ignore_self = ignore_self def create_summary(self): if (not self.ignore_self): res = summary.summarize(muppy.get_objects()) else: all_of_them = [] ref_counter = {} def store_info(o): all_of_them.append(o) if (id(o) in ref_counter): ref_counter[id(o)] += 1 else: ref_counter[id(o)] = 1 store_info(self.summaries) for (k, v) in self.summaries.items(): store_info(k) summary._traverse(v, store_info) res = summary.summarize(muppy.get_objects()) for _id in ref_counter: if (len(gc.get_referrers(_id)) == 3): summary._subtract(res, _id) for o in all_of_them: if (len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2)): summary._subtract(res, o) return res def diff(self, summary1=None, summary2=None): res = None if (summary2 is None): self.s1 = self.create_summary() if (summary1 is None): res = summary.get_diff(self.s0, self.s1) else: res = summary.get_diff(summary1, self.s1) self.s0 = self.s1 elif (summary1 is not None): res = summary.get_diff(summary1, summary2) else: raise ValueError('You cannot provide summary2 without summary1.') return summary._sweep(res) def print_diff(self, summary1=None, summary2=None): summary.print_(self.diff(summary1=summary1, summary2=summary2)) def format_diff(self, summary1=None, summary2=None): return summary.format_(self.diff(summary1=summary1, summary2=summary2)) def store_summary(self, key): self.summaries[key] = self.create_summary()
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) logger.info(f'Training/evaluation parameters {training_args}') last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') ds = load_dataset(data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir) data_args.train_val_split = (None if ('validation' in ds.keys()) else data_args.train_val_split) if (isinstance(data_args.train_val_split, float) and (data_args.train_val_split > 0.0)): split = ds['train'].train_test_split(data_args.train_val_split) ds['train'] = split['train'] ds['validation'] = split['test'] config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)} if model_args.config_name_or_path: config = AutoConfig.from_pretrained(model_args.config_name_or_path, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.') if (model_args.config_overrides is not None): logger.info(f'Overriding config: {model_args.config_overrides}') config.update_from_string(model_args.config_overrides) logger.info(f'New config: {config}') if hasattr(config, 'decoder_type'): config.decoder_type = 'simmim' model_args.image_size = (model_args.image_size if (model_args.image_size is not None) else config.image_size) model_args.patch_size = (model_args.patch_size if (model_args.patch_size is not None) else config.patch_size) model_args.encoder_stride = (model_args.encoder_stride if (model_args.encoder_stride is not None) else config.encoder_stride) config.update({'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride}) if model_args.feature_extractor_name: feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.feature_extractor_name, **config_kwargs) elif model_args.model_name_or_path: feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: FEATURE_EXTRACTOR_TYPES = {conf.model_type: feature_extractor_class for (conf, feature_extractor_class) in FEATURE_EXTRACTOR_MAPPING.items()} feature_extractor = FEATURE_EXTRACTOR_TYPES[model_args.model_type]() if model_args.model_name_or_path: model = AutoModelForMaskedImageModeling.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) else: logger.info('Training new model from scratch') model = AutoModelForMaskedImageModeling.from_config(config) if training_args.do_train: column_names = ds['train'].column_names else: column_names = ds['validation'].column_names if (data_args.image_column_name is not None): image_column_name = data_args.image_column_name elif ('image' in column_names): image_column_name = 'image' elif ('img' in column_names): image_column_name = 'img' else: image_column_name = column_names[0] transforms = Compose([Lambda((lambda img: (img.convert('RGB') if (img.mode != 'RGB') else img))), RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=((3.0 / 4.0), (4.0 / 3.0))), RandomHorizontalFlip(), ToTensor(), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)]) mask_generator = MaskGenerator(input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio) def preprocess_images(examples): examples['pixel_values'] = [transforms(image) for image in examples[image_column_name]] examples['mask'] = [mask_generator() for i in range(len(examples[image_column_name]))] return examples if training_args.do_train: if ('train' not in ds): raise ValueError('--do_train requires a train dataset') if (data_args.max_train_samples is not None): ds['train'] = ds['train'].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ds['train'].set_transform(preprocess_images) if training_args.do_eval: if ('validation' not in ds): raise ValueError('--do_eval requires a validation dataset') if (data_args.max_eval_samples is not None): ds['validation'] = ds['validation'].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ds['validation'].set_transform(preprocess_images) trainer = Trainer(model=model, args=training_args, train_dataset=(ds['train'] if training_args.do_train else None), eval_dataset=(ds['validation'] if training_args.do_eval else None), tokenizer=feature_extractor, data_collator=collate_fn) if training_args.do_train: checkpoint = None if (training_args.resume_from_checkpoint is not None): checkpoint = training_args.resume_from_checkpoint elif (last_checkpoint is not None): checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics('train', train_result.metrics) trainer.save_metrics('train', train_result.metrics) trainer.save_state() if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling']} if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs)
def drop_path(x, drop_prob: float=0.0, training: bool=False): if ((drop_prob == 0.0) or (not training)): return x keep_prob = (1 - drop_prob) shape = ((x.shape[0],) + ((1,) * (x.ndim - 1))) random_tensor = x.new_empty(shape).bernoulli_(keep_prob) if (keep_prob > 0.0): random_tensor.div_(keep_prob) output = (x * random_tensor) return output
def backup_output(output: Path) -> None: backup_dir = os.environ.get('TMP_OUTPUT_PATH') snapshot_dir = os.environ.get('SNAPSHOT_PATH') if (backup_dir is None): assert (snapshot_dir is None) return assert (snapshot_dir is not None) try: relative_output_dir = output.relative_to(env.PROJECT_DIR) except ValueError: return for dir_ in [backup_dir, snapshot_dir]: new_output = (dir_ / relative_output_dir) prev_backup_output = new_output.with_name((new_output.name + '_prev')) new_output.parent.mkdir(exist_ok=True, parents=True) if new_output.exists(): new_output.rename(prev_backup_output) shutil.copytree(output, new_output) if output.with_suffix('.toml').exists(): shutil.copyfile(output.with_suffix('.toml'), new_output.with_suffix('.toml')) if prev_backup_output.exists(): shutil.rmtree(prev_backup_output) global _LAST_SNAPSHOT_TIME if ((_LAST_SNAPSHOT_TIME is None) or ((time.time() - _LAST_SNAPSHOT_TIME) > (10 * 60))): import nirvana_dl.snapshot nirvana_dl.snapshot.dump_snapshot() _LAST_SNAPSHOT_TIME = time.time() print('The snapshot was saved!')
() def pycache_clean(context): with context.cd(TASK_ROOT_STR): dirs = set() for (root, dirnames, _) in os.walk(os.curdir): if ('__pycache__' in dirnames): dirs.add(os.path.join(root, '__pycache__')) print('Removing __pycache__ directories') rmrf(dirs, verbose=False)
class PreOCIModel(SuperuserDataInterface): def get_repository_build(self, uuid): try: build = model.build.get_repository_build(uuid) except model.InvalidRepositoryBuildException as e: raise InvalidRepositoryBuildException(str(e)) repo_namespace = build.repository.namespace_user.username repo_name = build.repository.name can_read = ReadRepositoryPermission(repo_namespace, repo_name).can() can_write = ModifyRepositoryPermission(repo_namespace, repo_name).can() can_admin = AdministerRepositoryPermission(repo_namespace, repo_name).can() job_config = get_job_config(build.job_config) (phase, status, error) = _get_build_status(build) url = '' if (build.resource_key is not None): url = userfiles.get_file_url(build.resource_key, get_request_ip(), requires_cors=True) return RepositoryBuild(build.uuid, build.logs_archived, repo_namespace, repo_name, can_write, can_read, _create_user(build.pull_robot), build.resource_key, BuildTrigger(build.trigger, _create_user(build.trigger.pull_robot), can_read, can_admin, True), build.display_name, build.started, job_config, phase, status, error, url) def delete_service_key(self, kid): try: key = model.service_keys.delete_service_key(kid) except model.ServiceKeyDoesNotExist: raise ServiceKeyDoesNotExist return _create_key(key) def update_service_key(self, kid, name=None, metadata=None): model.service_keys.update_service_key(kid, name, metadata) def set_key_expiration(self, kid, expiration_date): model.service_keys.set_key_expiration(kid, expiration_date) def get_service_key(self, kid, service=None, alive_only=True, approved_only=True): try: key = model.service_keys.get_service_key(kid, approved_only=approved_only, alive_only=alive_only) return _create_key(key) except model.ServiceKeyDoesNotExist: raise ServiceKeyDoesNotExist def approve_service_key(self, kid, approver, approval_type, notes=''): try: key = model.service_keys.approve_service_key(kid, approval_type, approver=approver, notes=notes) return _create_key(key) except model.ServiceKeyDoesNotExist: raise ServiceKeyDoesNotExist except model.ServiceKeyAlreadyApproved: raise ServiceKeyAlreadyApproved def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, rotation_duration=None): (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, metadata=metadata, name=name) return (private_key, key.kid) def list_all_service_keys(self): keys = model.service_keys.list_all_keys() return [_create_key(key) for key in keys] def change_organization_name(self, old_org_name, new_org_name): org = model.organization.get_organization(old_org_name) if (new_org_name is not None): org = model.user.change_username(org.id, new_org_name) quotas = _get_namespace_quotas(org) return Organization(org.username, org.email, quotas) def mark_organization_for_deletion(self, name): org = model.organization.get_organization(name) model.user.mark_namespace_for_deletion(org, all_queues, namespace_gc_queue, force=True) def take_ownership(self, namespace, authed_user): entity = model.user.get_user_or_org(namespace) if (entity is None): return (None, False) was_user = (not entity.organization) if entity.organization: model.organization.add_user_as_admin(authed_user, entity) else: model.organization.convert_user_to_organization(entity, authed_user) return (entity.id, was_user) def update_enabled(self, username, enabled): user = model.user.get_nonrobot_user(username) model.user.update_enabled(user, bool(enabled)) def update_email(self, username, email, auto_verify): user = model.user.get_nonrobot_user(username) model.user.update_email(user, email, auto_verify) def change_password(self, username, password): user = model.user.get_nonrobot_user(username) model.user.change_password(user, password) def mark_user_for_deletion(self, username): user = model.user.get_nonrobot_user(username) model.user.mark_namespace_for_deletion(user, all_queues, namespace_gc_queue, force=True) def create_reset_password_email_code(self, email): code = model.user.create_reset_password_email_code(email) return code def get_nonrobot_user(self, username): user = model.user.get_nonrobot_user(username) if (user is None): return None return _create_user(user) def create_install_user(self, username, password, email): prompts = model.user.get_default_user_prompts(features) user = model.user.create_user(username, password, email, auto_verify=(not features.MAILING), email_required=features.MAILING, prompts=prompts) return_user = _create_user(user) if features.MAILING: confirmation_code = model.user.create_confirm_email_code(user) return (return_user, confirmation_code) return (return_user, '') def get_active_users(self, disabled=True): users = model.user.get_active_users(disabled=disabled) return [_create_user(user) for user in users] def get_active_users_paginated(self, disabled=True, limit=50, page_token=None): (users, next_page_token) = model.modelutil.paginate(model.user.get_active_users(disabled=disabled), database.User, page_token=page_token, limit=limit) return ([_create_user(user) for user in users], next_page_token) def get_organizations(self): return [Organization(org.username, org.email, _get_namespace_quotas(org)) for org in model.organization.get_organizations()] def get_organizations_paginated(self, limit=50, page_token=None): (orgs, next_page_token) = model.modelutil.paginate(model.organization.get_organizations(), database.User, page_token=page_token, limit=limit) return ([Organization(org.username, org.email, _get_namespace_quotas(org)) for org in orgs], next_page_token)
def parse_gstreamer_taglist(tags): merged = {} for key in tags.keys(): value = tags[key] if (key == 'extended-comment'): if (not isinstance(value, list)): value = [value] for val in value: if (not isinstance(val, str)): continue split = val.split('=', 1) sub_key = split[0] val = split[(- 1)] if (sub_key in merged): sub_val = merged[sub_key] if (not isinstance(sub_val, str)): continue if (val not in sub_val.split('\n')): merged[sub_key] += ('\n' + val) else: merged[sub_key] = val elif isinstance(value, Gst.DateTime): value = value.to_iso8601_string() merged[key] = value else: if isinstance(value, (int | float)): merged[key] = value continue if isinstance(value, bytes): value = decode(value) if (not isinstance(value, str)): value = str(value) if (key in merged): merged[key] += ('\n' + value) else: merged[key] = value return merged
class GdbLaunch(sublime_plugin.WindowCommand): def run(self): global exec_settings s = self.window.active_view().settings() exec_choices = s.get('sublimegdb_executables') if ((exec_choices is None) or (type(exec_choices) != dict)): global gdb_threads exec_settings = {} t = threading.Thread(target=self.launch) t.start() gdb_threads.append(t) return def on_choose(index): global exec_settings global gdb_threads if (index == (- 1)): return exec_name = list(exec_choices)[index] exec_settings = exec_choices[exec_name] t = threading.Thread(target=self.launch) t.start() gdb_threads.append(t) self.window.show_quick_panel(list(exec_choices), on_choose) def launch(self): global gdb_process global gdb_server_process global gdb_threads global gdb_run_status global gdb_bkp_window global gdb_bkp_view global gdb_bkp_layout global gdb_shutting_down global DEBUG global DEBUG_FILE view = self.window.active_view() DEBUG = get_setting('debug', False, view) DEBUG_FILE = expand_path(get_setting('debug_file', 'stdout', view), self.window) if DEBUG: log_debug(('Will write debug info to file: %s' % DEBUG_FILE)) if ((gdb_process is None) or (gdb_process.poll() is not None)): commandline = get_setting('commandline', view=view) if isinstance(commandline, list): commandline = ' '.join(commandline) commandline = expand_path(commandline, self.window) path = expand_path(get_setting('workingdir', '/tmp', view), self.window) arguments = expand_path(get_setting('arguments', ''), self.window) log_debug(('Running: %s\n' % commandline)) log_debug(('In directory: %s\n' % path)) if ((commandline == 'notset') or (path == 'notset')): sublime.error_message('You have not configured the plugin correctly, the default configuration file and your user configuration file will open in a new window') sublime.run_command('new_window') wnd = sublime.active_window() wnd.set_layout({'cols': [0.0, 0.5, 1.0], 'rows': [0, 1.0], 'cells': [[0, 0, 1, 1], [1, 0, 2, 1]]}) v = wnd.open_file(('%s/User/SublimeGDB.sublime-settings' % sublime.packages_path())) v2 = wnd.open_file(('%s/SublimeGDB/SublimeGDB.sublime-settings' % sublime.packages_path())) wnd.set_view_index(v2, 1, 0) return if (not os.path.exists(path)): sublime.error_message(('The directory given does not exist: %s' % path)) return gdb_env = get_setting('env', 'notset') if (gdb_env == 'notset'): gdb_env = None else: env_copy = os.environ.copy() env_copy.update(gdb_env) gdb_env = env_copy gdb_server_cmd = get_setting('server_commandline', 'notset') gdb_server_dir = get_setting('server_workingdir', 'notset') if ((gdb_server_cmd != 'notset') and (gdb_server_dir != 'notset')): gdb_server_cmd = expand_path(gdb_server_cmd, self.window) gdb_server_dir = expand_path(gdb_server_dir, self.window) gdb_server_shell = get_setting('server_shell', False) log_debug(('gdb_server_cmd: %s' % gdb_server_cmd)) log_debug(('gdb_server_dir: %s' % gdb_server_dir)) log_debug(('gdb_server_dir: %s' % gdb_server_shell)) gdb_server_process = subprocess.Popen(gdb_server_cmd, shell=gdb_server_shell, cwd=gdb_server_dir, env=gdb_env) gdb_process = subprocess.Popen(commandline, shell=True, cwd=path, env=gdb_env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) log_debug(('Process: %s\n' % gdb_process)) gdb_bkp_window = sublime.active_window() gdb_bkp_layout = gdb_bkp_window.get_layout() gdb_bkp_view = gdb_bkp_window.active_view() gdb_bkp_window.set_layout(get_setting('layout', {'cols': [0.0, 0.5, 1.0], 'rows': [0.0, 0.75, 1.0], 'cells': [[0, 0, 2, 1], [0, 1, 1, 2], [1, 1, 2, 2]]})) for view in gdb_views: if (view.is_closed() and view.open_at_start()): view.open() view.clear() gdb_shutting_down = False t = threading.Thread(target=gdboutput, args=(gdb_process.stdout,)) t.start() gdb_threads.append(t) t = threading.Thread(target=gdboutput, args=(gdb_process.stderr,)) t.start() gdb_threads.append(t) try: raise Exception('Nope') (pty, tty) = os.openpty() name = os.ttyname(tty) except: (pipe, name) = tempfile.mkstemp() (pty, tty) = (pipe, None) log_debug(('pty: %s, tty: %s, name: %s' % (pty, tty, name))) t = threading.Thread(target=programio, args=(pty, tty)) t.start() gdb_threads.append(t) try: run_cmd('-gdb-show interpreter', True, timeout=get_setting('gdb_timeout', 20)) except: sublime.error_message('It seems you\'re not running gdb with the "mi" interpreter. Please add\n"--interpreter=mi" to your gdb command line') gdb_process.stdin.write('quit\n') return run_cmd(('-inferior-tty-set %s' % name), True) run_cmd('-enable-pretty-printing') run_cmd('-gdb-set mi-async on') run_cmd('-gdb-set pagination off') dis_asm_flavor = get_setting('disassembly_flavor', 'att', view) if (dis_asm_flavor == 'intel'): run_cmd('-gdb-set disassembly-flavor intel') else: run_cmd('-gdb-set disassembly-flavor att') attach_cmd = get_setting('attach_cmd', 'notset') if (attach_cmd != 'notset'): run_cmd(attach_cmd, block=True, timeout=get_setting('gdb_timeout', 20)) gdb_breakpoint_view.sync_breakpoints() if get_setting('run_after_init', True): gdb_run_status = 'running' if arguments: run_cmd(('-exec-arguments ' + arguments)) run_cmd(get_setting('exec_cmd', '-exec-run'), True) else: gdb_run_status = 'stopped' if get_setting('enable_pretty_printing', True): run_cmd('-enable-pretty-printing') show_input() else: sublime.status_message('GDB is already running!') def is_enabled(self): return (not is_running()) def is_visible(self): return (not is_running())
class HDDGraph(_Graph): fixed_upper_bound = True orientations = base.ORIENTATION_HORIZONTAL defaults = [('path', '/', 'Partition mount point.'), ('space_type', 'used', 'free/used')] def __init__(self, **config): _Graph.__init__(self, **config) self.add_defaults(HDDGraph.defaults) stats = statvfs(self.path) self.maxvalue = (stats.f_blocks * stats.f_frsize) values = self._get_values() self.fulfill(values) def _get_values(self): stats = statvfs(self.path) if (self.space_type == 'used'): return ((stats.f_blocks - stats.f_bfree) * stats.f_frsize) else: return (stats.f_bavail * stats.f_frsize) def update_graph(self): val = self._get_values() self.push(val)
def test_deprecated_alias_method(recwarn_always): obj = Alias() assert (obj.old_hotness_method() == 'new hotness method') got = recwarn_always.pop(TrioAsyncioDeprecationWarning) msg = got.message.args[0] assert ('test_deprecate.Alias.old_hotness_method is deprecated' in msg) assert ('test_deprecate.Alias.new_hotness_method instead' in msg)
class LazyEncodingPats(object): def __call__(self, binary=False): attr = ('binary_pats' if binary else 'unicode_pats') pats = getattr(self, attr, None) if (pats is None): pats = tuple(compile_pats(binary)) setattr(self, attr, pats) for pat in pats: (yield pat)
def report_long_words(st, locn, toks): word = toks[0] if (len(word) > 3): print(f'Found {word!r} on line {pp.lineno(locn, st)} at column {pp.col(locn, st)}') print('The full line of text was:') print(f'{pp.line(locn, st)!r}') print(f" {'^':>{pp.col(locn, st)}}") print()
class DecoderOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.') self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') self.parser.add_argument('--load_path', type=str, default='') self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau') self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') self.parser.add_argument('--identity', type=float, default=0.5, help='use identity mapping. Setting identity other than 1 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set optidentity = 0.1') self.parser.add_argument('--alpha', type=float, default=0, help='weight for GAN loss of samples without pixel loss') self.parser.add_argument('--momentum', type=float, default=0, help='momentum for RMSprop optimiser') self.parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay for RMSprop optimiser') self.parser.add_argument('--lambda_feature_loss', type=float, default=0, help='weight for feature loss') self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') self.parser.add_argument('--crop_type', type=str, default='random_crop', help='random crop or center crop') self.parser.add_argument('--name_landmarks_list', type=str, help='name of landmarks list for A and B') self.parser.add_argument('--num_stacks', type=int, default=2, help='num of stacks of hourglass network') self.parser.add_argument('--num_blocks', type=int, default=1, help='uumber of residual modules at each location in the hourglass') self.parser.add_argument('--sigma', type=float, default=1, help='sigma for gaussian map') self.parser.add_argument('--fineSize_F1', type=int, default=256, help='fine size of F1 for network') self.parser.add_argument('--fineSize_F2', type=int, default=256, help='fine size of F2 for network') self.parser.add_argument('--fineSize_Boundary', type=int, default=64, help='fine size of Boundary for network') self.parser.add_argument('--nc_F1', type=int, default=3, help='# of channels of F1') self.parser.add_argument('--nc_F2', type=int, default=3, help='# of channels of F2') self.parser.add_argument('--nc_Boundary', type=int, default=15, help='# of channels of Boundary') self.parser.add_argument('--feature_loss', action='store_true', help='need feature loss for training') self.parser.add_argument('--lambda_pix_loss', type=float, default=100.0, help='weight for pixel loss') self.parser.add_argument('--input_type_D', type=str, default='type_0', help='input type of D') self.parser.add_argument('--feature_loss_type', type=str, default='relu1_2', help='feature loss type') self.parser.add_argument('--which_boundary_detection', type=str, default='v0', help='which version of boundary detection model') self.isTrain = True
class MemoryDockerCollector(MemoryCgroupCollector): def collect(self): if (docker is None): self.log.error('Unable to import docker') return self.containers = dict(((c['Id'], c['Names'][0][1:]) for c in docker.Client().containers(all=True) if (c['Names'] is not None))) return super(MemoryDockerCollector, self).collect() def publish(self, metric_name, value, metric_type): for (container_id, container_name) in self.containers.items(): metric_name = metric_name.replace((('docker.' + container_id) + '.'), (('docker.' + container_name) + '.')) return super(MemoryDockerCollector, self).publish(metric_name, value, metric_type)
def test_message_ack_timing_keeper_edge_cases(): matk = MessageAckTimingKeeper() assert (matk.generate_report() == []) processed = Processed(MessageID(999), make_signature()) matk.finalize_message(processed) assert (matk.generate_report() == []) reveal_secret = RevealSecret(MessageID(1), make_signature(), make_secret()) matk.add_message(reveal_secret) assert (matk.generate_report() == [])
def parse_json_file(filepath, metric): filepath = Path(filepath) name = filepath.name.split('.')[0] with filepath.open('r') as f: try: data = json.load(f) except json.decoder.JSONDecodeError as err: print(f'Error reading file "{filepath}"') raise err if (('results' not in data) or ('bpp' not in data['results'])): raise ValueError(f'Invalid file "{filepath}"') if (metric not in data['results']): raise ValueError(f"""Error: metric "{metric}" not available. Available metrics: {', '.join(data['results'].keys())}""") if (metric == 'ms-ssim'): values = np.array(data['results'][metric]) data['results'][metric] = ((- 10) * np.log10((1 - values))) return {'name': data.get('name', name), 'xs': data['results']['bpp'], 'ys': data['results'][metric]}
def cdf_generator(N=(10 ** 6)): assert (type(N) == int), 'N should be an int.' pi = np.pi Theta_dist = np.empty(N) for i in range(N): psi = (random() * pi) phi = ((random() * 2) * pi) costh = random() cosiota = random() Fplus = ((((0.5 * (1 + (costh ** 2))) * np.cos((2 * phi))) * np.cos((2 * psi))) - ((costh * np.sin((2 * phi))) * np.sin((2 * psi)))) Fcross = ((((0.5 * (1 + (costh ** 2))) * np.cos((2 * phi))) * np.sin((2 * psi))) + ((costh * np.sin((2 * phi))) * np.cos((2 * psi)))) Theta = (0.5 * np.sqrt((((Fplus ** 2) * ((1 + (cosiota ** 2)) ** 2)) + ((4 * (Fcross ** 2)) * (cosiota ** 2))))) Theta_dist[i] = Theta Theta_dist = np.sort(Theta_dist) Theta_CDF = interp1d(Theta_dist, np.linspace(0, 1, N), kind='cubic') min_CDF = Theta_dist[0] max_CDF = Theta_dist[(N - 1)] min_CDF = float(min_CDF) max_CDF = float(max_CDF) return (Theta_CDF, min_CDF, max_CDF)
_vcs_handler('git', 'pieces_from_vcs') def git_pieces_from_vcs(tag_prefix: str, root: str, verbose: bool, runner: Callable=run_command) -> Dict[(str, Any)]: GITS = ['git'] if (sys.platform == 'win32'): GITS = ['git.cmd', 'git.exe'] env = os.environ.copy() env.pop('GIT_DIR', None) runner = functools.partial(runner, env=env) (_, rc) = runner(GITS, ['rev-parse', '--git-dir'], cwd=root, hide_stderr=(not verbose)) if (rc != 0): if verbose: print(('Directory %s not under git control' % root)) raise NotThisMethod("'git rev-parse --git-dir' returned error") (describe_out, rc) = runner(GITS, ['describe', '--tags', '--dirty', '--always', '--long', '--match', f'{tag_prefix}[[:digit:]]*'], cwd=root) if (describe_out is None): raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() (full_out, rc) = runner(GITS, ['rev-parse', 'HEAD'], cwd=root) if (full_out is None): raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[(str, Any)] = {} pieces['long'] = full_out pieces['short'] = full_out[:7] pieces['error'] = None (branch_name, rc) = runner(GITS, ['rev-parse', '--abbrev-ref', 'HEAD'], cwd=root) if ((rc != 0) or (branch_name is None)): raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if (branch_name == 'HEAD'): (branches, rc) = runner(GITS, ['branch', '--contains'], cwd=root) if ((rc != 0) or (branches is None)): raise NotThisMethod("'git branch --contains' returned error") branches = branches.split('\n') if ('(' in branches[0]): branches.pop(0) branches = [branch[2:] for branch in branches] if ('master' in branches): branch_name = 'master' elif (not branches): branch_name = None else: branch_name = branches[0] pieces['branch'] = branch_name git_describe = describe_out dirty = git_describe.endswith('-dirty') pieces['dirty'] = dirty if dirty: git_describe = git_describe[:git_describe.rindex('-dirty')] if ('-' in git_describe): mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe) if (not mo): pieces['error'] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces full_tag = mo.group(1) if (not full_tag.startswith(tag_prefix)): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print((fmt % (full_tag, tag_prefix))) pieces['error'] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces['closest-tag'] = full_tag[len(tag_prefix):] pieces['distance'] = int(mo.group(2)) pieces['short'] = mo.group(3) else: pieces['closest-tag'] = None (out, rc) = runner(GITS, ['rev-list', 'HEAD', '--left-right'], cwd=root) pieces['distance'] = len(out.split()) date = runner(GITS, ['show', '-s', '--format=%ci', 'HEAD'], cwd=root)[0].strip() date = date.splitlines()[(- 1)] pieces['date'] = date.strip().replace(' ', 'T', 1).replace(' ', '', 1) return pieces
def test(args): logger = logging.getLogger(__name__) DATA_PATH = hydra.utils.to_absolute_path(args.dataset_dir) if (args.dataset == 'ModelNet40'): test_loader = DataLoader(ModelNet40(DATA_PATH, partition='test', num_points=args.num_points), num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False) elif (args.dataset == 'ScanObjectNN'): test_loader = DataLoader(ScanObjectNN(DATA_PATH, partition='test', num_points=args.num_points), num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False) else: raise NotImplementedError os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) model = PointConT_cls(args).cuda() checkpoint = torch.load('model.pth') model.load_state_dict(checkpoint['model_state_dict']) logger.info('Start Testing ... ') model = model.eval() test_acc = 0.0 test_true = [] test_pred = [] for (data, label) in tqdm(test_loader): (data, label) = (data.cuda(), label.cuda().squeeze()) batch_size = data.size()[0] logits = model(data) preds = logits.max(dim=1)[1] test_true.append(label.cpu().numpy()) test_pred.append(preds.detach().cpu().numpy()) test_true = np.concatenate(test_true) test_pred = np.concatenate(test_pred) test_acc = metrics.accuracy_score(test_true, test_pred) test_acc_avg = metrics.balanced_accuracy_score(test_true, test_pred) logger.info(('test acc: %.6f' % test_acc)) logger.info(('test avg acc: %.6f' % test_acc_avg)) if args.flops_profiler: input = [torch.randn_like(data)] (flops, macs, params) = profile_model(model, input) logger.info(f'GFLOPs GMACs Params.(M)') logger.info(f'{(flops / (float(batch_size) * .0)): .2f} {(macs / (float(batch_size) * .0)): .2f} {(params / 1000000.0): .3f}')
class TestMessageBase(): id_ = 1 from_user = User(2, 'testuser', False) date = datetime.utcnow() chat = Chat(3, 'private') test_entities = [{'length': 4, 'offset': 10, 'type': 'bold'}, {'length': 3, 'offset': 16, 'type': 'italic'}, {'length': 3, 'offset': 20, 'type': 'italic'}, {'length': 4, 'offset': 25, 'type': 'code'}, {'length': 5, 'offset': 31, 'type': 'text_link', 'url': ' {'length': 12, 'offset': 38, 'type': 'text_mention', 'user': User(, 'mentioned user', False)}, {'length': 3, 'offset': 55, 'type': 'pre', 'language': 'python'}, {'length': 21, 'offset': 60, 'type': 'url'}] test_text = 'Test for <bold, ita_lic, code, links, text-mention and pre. test_entities_v2 = [{'length': 4, 'offset': 0, 'type': 'underline'}, {'length': 4, 'offset': 10, 'type': 'bold'}, {'length': 7, 'offset': 16, 'type': 'italic'}, {'length': 6, 'offset': 25, 'type': 'code'}, {'length': 5, 'offset': 33, 'type': 'text_link', 'url': ' {'length': 12, 'offset': 40, 'type': 'text_mention', 'user': User(, 'mentioned user', False)}, {'length': 5, 'offset': 57, 'type': 'pre'}, {'length': 17, 'offset': 64, 'type': 'url'}, {'length': 41, 'offset': 86, 'type': 'italic'}, {'length': 29, 'offset': 91, 'type': 'bold'}, {'length': 9, 'offset': 101, 'type': 'strikethrough'}, {'length': 10, 'offset': 129, 'type': 'pre', 'language': 'python'}, {'length': 7, 'offset': 141, 'type': 'spoiler'}, {'length': 2, 'offset': 150, 'type': 'custom_emoji', 'custom_emoji_id': '1'}] test_text_v2 = 'Test for <bold, ita_lic, \\`code, links, text-mention and `\\pre. and bold nested in strk>trgh nested in italic. Python pre. Spoiled. .' test_message = Message(message_id=1, from_user=None, date=None, chat=None, text=test_text, entities=[MessageEntity(**e) for e in test_entities], caption=test_text, caption_entities=[MessageEntity(**e) for e in test_entities]) test_message_v2 = Message(message_id=1, from_user=None, date=None, chat=None, text=test_text_v2, entities=[MessageEntity(**e) for e in test_entities_v2], caption=test_text_v2, caption_entities=[MessageEntity(**e) for e in test_entities_v2])
def main(): args = parse_args() cfg = Config.fromfile(args.config) cfg.merge_from_dict(default_cfg) if (args.options is not None): cfg.merge_from_dict(args.options) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.gpus = args.gpus if (args.work_dir is not None): cfg.work_dir = args.work_dir elif (cfg.get('work_dir', None) is None): cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) cfg.suffix = args.suffix cfg.checkpoint = args.checkpoint wandb = None if (args.pretrained is not None): weight_path = args.pretrained assert osp.exists(weight_path) cfg.model.backbone.pretrained = weight_path for h in cfg.log_config.hooks: if ((h.type == 'WandbLoggerHook') and (not args.disable_wandb)): import wandb init_kwargs = h.init_kwargs.to_dict() mmcv.mkdir_or_exist(f'wandb/{os.path.basename(weight_path)}') init_kwargs.update(dict(name=os.path.basename(weight_path), resume=False, dir=f'wandb/{os.path.basename(weight_path)}', tags=[*h.init_kwargs.tags, 'siamfc'], config=cfg.to_dict())) wandb.init(**init_kwargs) mmcv.mkdir_or_exist(osp.join('./wandb', osp.splitext(osp.basename(args.config))[0])) mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'sf-{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) logger.info(f'Config: {cfg.text}') logger.info(f'Config.pretty_text: {cfg.pretty_text}') if (args.seed is not None): logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic)) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed def print_log(*args): logger.info(','.join(args)) builtins.print = print_log tracker = TrackerSiamFC(cfg, logger) if ((args.checkpoint is None) and ((cfg.model.backbone.frozen_stages < 4) or cfg.extra_conv)): train_seqs = GOT10k('data/GOT-10k', subset='train', return_meta=True) tracker.train_over(train_seqs) with torch.no_grad(): tracker.net.eval() e = ExperimentOTB('data/otb', version=2015, result_dir=osp.join(cfg.work_dir, cfg.suffix, 'results'), report_dir=osp.join(cfg.work_dir, cfg.suffix, 'reports')) e.run(tracker) performance = e.report([tracker.name]) overall = performance[tracker.name]['overall'] success_curve = overall.pop('success_curve') precision_curve = overall.pop('precision_curve') success_score = (overall['success_score'] * 100) success_score = np.round(success_score, 2) precision_score = (overall['precision_score'] * 100) precision_score = np.round(precision_score, 2) success_rate = (overall['success_rate'] * 100) success_rate = np.round(success_rate, 2) speed_fps = overall['speed_fps'] speed_fps = np.round(speed_fps, 2) logger.info(f'copypaste: {precision_score},{success_score}') logger.info(f'success_score: {success_score}') logger.info(f'precision_score: {precision_score}') logger.info(f'success_rate: {success_rate}') logger.info(f'speed_fps: {speed_fps}') if (wandb is not None): wandb.log(overall) data = [[x, y] for (x, y) in zip(np.linspace(0, 1, len(success_curve)), success_curve)] table = wandb.Table(data=data, columns=['Overlap threshold', 'Success rate']) wandb.log({'Success': wandb.plot.line(table, 'Overlap threshold', 'Success rate', title='Success plots of OPE')}) data = [[x, y] for (x, y) in zip(np.linspace(0, 1, len(precision_curve)), precision_curve)] table = wandb.Table(data=data, columns=['Location error threshold', 'Precision']) wandb.log({'Precision': wandb.plot.line(table, 'Location error threshold', 'Precision', title='Precision plots of OPE')}) wandb.join()