code
stringlengths
101
5.91M
class BertDictionary(MaskedLMDictionary): def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', mask='<mask>', cls='<cls>', sep='<sep>'): super().__init__(pad, eos, unk, mask) self.cls_word = cls self.sep_word = sep self.cls_index = self.add_symbol(cls) self.sep_index = self.add_symbol(sep) self.nspecial = len(self.symbols) def cls(self): return self.cls_index def sep(self): return self.sep_index
class WeightedIntegerVectors_all(DisjointUnionEnumeratedSets): def __init__(self, weight): self._weights = weight from sage.sets.family import Family from sage.sets.non_negative_integers import NonNegativeIntegers from functools import partial F = Family(NonNegativeIntegers(), partial(WeightedIntegerVectors, weight=weight)) cat = (SetsWithGrading(), InfiniteEnumeratedSets()) DisjointUnionEnumeratedSets.__init__(self, F, facade=True, keepkey=False, category=cat) def _repr_(self): return ('Integer vectors weighted by %s' % list(self._weights)) def __contains__(self, x): return (isinstance(x, (list, IntegerVector, Permutation)) and (len(x) == len(self._weights)) and all((((i in ZZ) and (i >= 0)) for i in x))) def subset(self, size=None): if (size is None): return self return self._family[size] def grading(self, x): return sum(((exp * deg) for (exp, deg) in zip(x, self._weights)))
class StatsFileFramerateMismatch(Exception): def __init__(self, base_timecode_fps, stats_file_fps, message='Framerate differs between stats file and base timecode.'): super(StatsFileFramerateMismatch, self).__init__(message) self.base_timecode_fps = base_timecode_fps self.stats_file_fps = stats_file_fps
def ground(statement_path, cpnet_vocab_path, pattern_path, output_path, num_processes=1, debug=False): global PATTERN_PATH, CPNET_VOCAB if (PATTERN_PATH is None): PATTERN_PATH = pattern_path CPNET_VOCAB = load_cpnet_vocab(cpnet_vocab_path) sents = [] answers = [] with open(statement_path, 'r') as fin: lines = [line for line in fin] if debug: lines = lines[192:195] print(len(lines)) for line in lines: if (line == ''): continue j = json.loads(line) for statement in j['statements']: sents.append(statement['statement']) for answer in j['question']['choices']: ans = answer['text'] try: assert all([(i != '_') for i in ans]) except Exception: print(ans) answers.append(ans) res = match_mentioned_concepts(sents, answers, num_processes) res = prune(res, cpnet_vocab_path) with open(output_path, 'w') as fout: for dic in res: fout.write((json.dumps(dic) + '\n')) print(f'grounded concepts saved to {output_path}') print()
_experiment def vpg_pendulum(ctxt=None, seed=1): set_seed(seed) env = GarageEnv(env_name='InvertedDoublePendulum-v2') runner = LocalRunner(ctxt) policy = GaussianMLPPolicy(env.spec, hidden_sizes=[64, 64], hidden_nonlinearity=torch.tanh, output_nonlinearity=None) value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, output_nonlinearity=None) algo = VPG(env_spec=env.spec, policy=policy, value_function=value_function, max_path_length=100, discount=0.99, center_adv=False) runner.setup(algo, env) runner.train(n_epochs=100, batch_size=10000)
def export_onnx_model(model, inputs): assert isinstance(model, torch.nn.Module) def _check_eval(module): assert (not module.training) model.apply(_check_eval) with torch.no_grad(): with io.BytesIO() as f: torch.onnx.export(model, inputs, f, operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK) onnx_model = onnx.load_from_string(f.getvalue()) all_passes = onnx.optimizer.get_available_passes() passes = ['fuse_bn_into_conv'] assert all(((p in all_passes) for p in passes)) onnx_model = onnx.optimizer.optimize(onnx_model, passes) return onnx_model
(scope='module') def dev_file_with_trees(tmp_path_factory): dev_set = (DATASET_WITH_TREES * 2) dev_filename = (tmp_path_factory.mktemp('data') / 'dev_trees.json') with open(dev_filename, 'w', encoding='utf-8') as fout: json.dump(dev_set, fout, ensure_ascii=False) return dev_filename
class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd): output_size: _size_3_t def forward(self, input: Tensor) -> Tensor: return F.adaptive_avg_pool3d(input, self.output_size)
def get_qas(text): out = subprocess.check_output(['from_question_generation/get_qnas', text]) questions = [line.split('\t') for line in str(out, 'utf-8').split('\n')] factoid_qas = [{'question': e[0], 'answer': e[1], 'score': e[2]} for e in questions if (len(e) == 3)] return factoid_qas
class Edge(SageObject): def __init__(self, p, label, rep, origin, target, links=None, opposite=None, determinant=None, valuation=None): if (links is None): links = [] if (determinant is None): determinant = rep.determinant() if (valuation is None): valuation = determinant.valuation(p) self.p = p self.label = label self.rep = rep self.rep.set_immutable() self.origin = origin self.target = target self.links = links self.opposite = opposite self.determinant = determinant self.valuation = valuation self.parity = (valuation % 2) def _repr_(self): return ('Edge of Bruhat-Tits tree for p = %s' % self.p) def __eq__(self, other): if (self.p != other.p): return False if (self.label != other.label): return False if (self.rep != other.rep): return False if (self.origin != other.origin): return False if (self.target != other.target): return False if (self.links != other.links): return False if (self.opposite != other.opposite): return False if (self.determinant != other.determinant): return False if (self.valuation != other.valuation): return False if (self.parity != other.parity): return False return True def __ne__(self, other): return (not self.__eq__(other))
def test_extract_nodes(): modela = ModelA() modelb = ModelB() modela.ref_field = modelb modela.ref_field2 = 'user_set_name' model_list = [] io._extract_nodes(modela, model_list) assert (len(model_list) == 2) assert (modela in model_list) assert (modelb in model_list)
def manually_copy_vissl_head(from_state_dict, to_state_dict, keys: List[Tuple[(str, str)]]): for (from_key, to_key) in keys: to_state_dict[to_key] = from_state_dict[from_key].clone() print(f'Copied key={from_key} to={to_key}') return to_state_dict
def stack_fn(x): x = stack1(x, 64, 3, name='conv2') x = stack1(x, 128, 4, name='conv3') x = stack1(x, 256, 6, name='conv4') return stack1(x, 512, 3, name='conv5')
def get_pattern(query): pattern = '^' for res in query: assert (res in known_abbrev), ('# Fatal error: character %s not known. Use AUCG/NYRSWKMBDHV' % res) if (res in rna): pattern += res else: if (res == 'N'): pattern += '[AUCGT]' if (res == 'Y'): pattern += '[UCT]' if (res == 'R'): pattern += '[AG]' if (res == 'S'): pattern += '[GC]' if (res == 'W'): pattern += '[AUT]' if (res == 'K'): pattern += '[GUT]' if (res == 'M'): pattern += '[AC]' if (res == 'B'): pattern += '[UCGT]' if (res == 'D'): pattern += '[AUGT]' if (res == 'H'): pattern += '[AUCT]' if (res == 'V'): pattern += '[ACG]' pattern += '$' return pattern
def get_df_info(df: pd.DataFrame): print(f'Total rows {len(df)}, unique users: {df.user_id.nunique()}, unique items: {df.item_id.nunique()}')
def create_feature_columns() -> Tuple[(list, list, list)]: (first_order_feature_columns, second_order_feature_columns, label_feature_columns) = ([], [], []) userid = fc.categorical_column_with_vocabulary_file('userid', os.path.join(FLAGS.vocabulary_dir, 'userid.txt')) feedid = fc.categorical_column_with_vocabulary_file('feedid', os.path.join(FLAGS.vocabulary_dir, 'feedid.txt')) device = fc.categorical_column_with_vocabulary_file('device', os.path.join(FLAGS.vocabulary_dir, 'device.txt')) authorid = fc.categorical_column_with_vocabulary_file('authorid', os.path.join(FLAGS.vocabulary_dir, 'authorid.txt')) bgm_song_id = fc.categorical_column_with_vocabulary_file('bgm_song_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_song_id.txt')) bgm_singer_id = fc.categorical_column_with_vocabulary_file('bgm_singer_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_singer_id.txt')) userid_one_hot = fc.indicator_column(userid) feedid_one_hot = fc.indicator_column(feedid) device_one_hot = fc.indicator_column(device) authorid_one_hot = fc.indicator_column(authorid) bgm_song_id_one_hot = fc.indicator_column(bgm_song_id) bgm_singer_id_one_hot = fc.indicator_column(bgm_singer_id) first_order_feature_columns += [userid_one_hot, feedid_one_hot, device_one_hot, authorid_one_hot, bgm_song_id_one_hot, bgm_singer_id_one_hot] userid_emb = fc.embedding_column(userid, FLAGS.embedding_dim) feedid_emb = fc.embedding_column(feedid, FLAGS.embedding_dim) device_emb = fc.embedding_column(device, FLAGS.embedding_dim) authorid_emb = fc.embedding_column(authorid, FLAGS.embedding_dim) bgm_song_id_emb = fc.embedding_column(bgm_song_id, FLAGS.embedding_dim) bgm_singer_id_emb = fc.embedding_column(bgm_singer_id, FLAGS.embedding_dim) second_order_feature_columns += [userid_emb, feedid_emb, device_emb, authorid_emb, bgm_song_id_emb, bgm_singer_id_emb] read_comment = fc.numeric_column('read_comment', default_value=0.0) label_feature_columns += [read_comment] return (first_order_feature_columns, second_order_feature_columns, label_feature_columns)
def infer(info, input_data): class tmp(): pass args = tmp tmp.outdir = '' tmp.result_outdir = '' class ForwardConfig(): pass config = ForwardConfig config.executors = info.executors.values() config.networks = [] for e in config.executors: if (e.network.name in info.networks.keys()): config.networks.append(info.networks[e.network.name]) else: logger.critical('Network {} is not found.'.format(config.executor.network.name)) return False normalize = True for d in info.datasets.values(): normalize = d.normalize input_file_index = 0 inputs = [] for e in config.executors: for (v, d) in e.dataset_assign.items(): data = input_data[input_file_index].reshape(v.variable_instance.d.shape) inputs.append((d, data)) input_file_index += 1 data = [] variables = [] for (v, d) in inputs: variables.append(v) data.append(d) return _forward(tmp, 0, config, data, variables, False)
def generate(template: Template, **kwargs) -> Callable: if hasattr(dsp.settings, 'inspect'): inspector = dsp.settings.inspect _generate = inspector.inspect_func(dsp.predict._generate) return _generate(template, **kwargs) else: return dsp.predict._generate(template, **kwargs)
class T5TokenizerFast(metaclass=DummyObject): _backends = ['tokenizers'] def __init__(self, *args, **kwargs): requires_backends(self, ['tokenizers'])
def get_format_strings(kv_pairs): log_strings = [] for (key, value) in kv_pairs: fmt = get_print_format(value) format_string = (('{}: {:' + fmt) + '}') log_strings.append(format_string.format(key, value)) return log_strings
class Empty(LayoutBuilder): def __init__(self): self._init(None) def __repr__(self): return 'ak.numba.lb.Empty(parameters=None)' def numbatype(self): import numba return ak._connect.numba.layoutbuilder.EmptyType(numba.types.StringLiteral(None)) def __len__(self): return 0 def form(self): return ak.forms.EmptyForm() def clear(self): pass def is_valid(self, error: str): return True def snapshot(self) -> ak.contents.Content: return ak.contents.EmptyArray()
def tukeylambda_kurtosis(lam): lam = np.asarray(lam) shp = lam.shape lam = np.atleast_1d(lam).astype(np.float64) threshold = 0.055 low_mask = (lam < (- 0.25)) negqrtr_mask = (lam == (- 0.25)) small_mask = (np.abs(lam) < threshold) reg_mask = (~ ((low_mask | negqrtr_mask) | small_mask)) small = lam[small_mask] reg = lam[reg_mask] k = np.empty_like(lam) k[low_mask] = np.nan k[negqrtr_mask] = np.inf if (small.size > 0): k[small_mask] = (_tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)) if (reg.size > 0): numer = (((1.0 / ((4 * reg) + 1)) - (4 * beta(((3 * reg) + 1), (reg + 1)))) + (3 * beta(((2 * reg) + 1), ((2 * reg) + 1)))) denom = (2 * (((1.0 / ((2 * reg) + 1)) - beta((reg + 1), (reg + 1))) ** 2)) k[reg_mask] = ((numer / denom) - 3) k.shape = shp return k
class Config(): def __init__(self) -> None: self.val_measures = {'Emax': {'CoCA': 0.783, 'CoSOD3k': 0.874, 'CoSal2015': 0.892}, 'Smeasure': {'CoCA': 0.71, 'CoSOD3k': 0.81, 'CoSal2015': 0.838}, 'Fmax': {'CoCA': 0.598, 'CoSOD3k': 0.805, 'CoSal2015': 0.856}} self.validation = True
class MSVDQADataModule(BaseDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def dataset_cls(self): return MSVDQADataset def dataset_name(self): return 'msvdqa' def setup(self, stage): super().setup(stage) self.answer2id = self.train_dataset.ans_lab_dict sorted_a2i = sorted(self.answer2id.items(), key=(lambda x: x[1])) self.num_class = (max(self.answer2id.values()) + 1) self.id2answer = defaultdict((lambda : 'unknown')) for (k, v) in sorted_a2i: self.id2answer[v] = k
class Point(survey.BaseRx): def __init__(self, locations, components='gz', **kwargs): super(Point, self).__init__(locations, **kwargs) if isinstance(components, str): components = [components] for component in components: validate_string('component', component, ['gx', 'gy', 'gz', 'gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz', 'guv']) self.components = components def nD(self): return (self.locations.shape[0] * len(self.components))
class CudaBuffer(): def __init__(self, id, target=GL_RENDERBUFFER, flags=pycuda.gl.graphics_map_flags.NONE): self.cuda_buffer = pycuda.gl.RegisteredImage(id, target, flags) self.cuda_buffer_map = self.cuda_buffer.map() def copy_to_tensor(self, tensor): (h, w, c) = tensor.shape copy_op = pycuda.driver.Memcpy2D() copy_op.set_src_array(self.cuda_buffer_map.array(0, 0)) copy_op.set_dst_device(tensor.data_ptr()) copy_op.width_in_bytes = ((w * c) * tensor.element_size()) copy_op.src_pitch = copy_op.width_in_bytes copy_op.dst_pitch = copy_op.width_in_bytes copy_op.height = tensor.shape[0] copy_op(aligned=False) def copy_from_tensor(self, tensor): (h, w, c) = tensor.shape copy_op = pycuda.driver.Memcpy2D() copy_op.set_src_device(tensor.data_ptr()) copy_op.set_dst_array(self.cuda_buffer_map.array(0, 0)) copy_op.width_in_bytes = ((w * c) * tensor.element_size()) copy_op.src_pitch = copy_op.width_in_bytes copy_op.dst_pitch = copy_op.width_in_bytes copy_op.height = h copy_op(aligned=False) def destroy(self): self.cuda_buffer_map.unmap() self.cuda_buffer.unregister()
def concatenate_images(ic): all_images = [image[(np.newaxis, ...)] for image in ic] try: array_cat = np.concatenate(all_images) except ValueError: raise ValueError('Image dimensions must agree.') return array_cat
class TestRoot(): def test_tol_parameter(self): def func(z): (x, y) = z return np.array([((x ** 3) - 1), ((y ** 3) - 1)]) def dfunc(z): (x, y) = z return np.array([[(3 * (x ** 2)), 0], [0, (3 * (y ** 2))]]) for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', 'diagbroyden', 'krylov']: if (method in ('linearmixing', 'excitingmixing')): continue if (method in ('hybr', 'lm')): jac = dfunc else: jac = None sol1 = root(func, [1.1, 1.1], jac=jac, tol=0.0001, method=method) sol2 = root(func, [1.1, 1.1], jac=jac, tol=0.5, method=method) msg = f'{method}: {func(sol1.x)} vs. {func(sol2.x)}' assert_(sol1.success, msg) assert_(sol2.success, msg) assert_((abs(func(sol1.x)).max() < abs(func(sol2.x)).max()), msg) def test_tol_norm(self): def norm(x): return abs(x[0]) for method in ['excitingmixing', 'diagbroyden', 'linearmixing', 'anderson', 'broyden1', 'broyden2', 'krylov']: root(np.zeros_like, np.zeros(2), method=method, options={'tol_norm': norm}) def test_minimize_scalar_coerce_args_param(self): def func(z, f=1): (x, y) = z return np.array([((x ** 3) - 1), ((y ** 3) - f)]) root(func, [1.1, 1.1], args=1.5) def test_f_size(self): class fun(): def __init__(self): self.count = 0 def __call__(self, x): self.count += 1 if (not (self.count % 5)): ret = ((x[0] + (0.5 * ((x[0] - x[1]) ** 3))) - 1.0) else: ret = [((x[0] + (0.5 * ((x[0] - x[1]) ** 3))) - 1.0), ((0.5 * ((x[1] - x[0]) ** 3)) + x[1])] return ret F = fun() with assert_raises(ValueError): root(F, [0.1, 0.0], method='lm') def test_gh_10370(self): def fun(x, ignored): return [(((3 * x[0]) - (0.25 * (x[1] ** 2))) + 10), (((0.1 * (x[0] ** 2)) + (5 * x[1])) - 2)] def grad(x, ignored): return [[3, (0.5 * x[1])], [(0.2 * x[0]), 5]] def fun_grad(x, ignored): return (fun(x, ignored), grad(x, ignored)) x0 = np.zeros(2) ref = root(fun, x0, args=(1,), method='krylov') message = 'Method krylov does not use the jacobian' with assert_warns(RuntimeWarning, match=message): res1 = root(fun, x0, args=(1,), method='krylov', jac=grad) with assert_warns(RuntimeWarning, match=message): res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True) assert_equal(res1.x, ref.x) assert_equal(res2.x, ref.x) assert (res1.success is res2.success is ref.success is True) .parametrize('method', ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', 'df-sane']) def test_method_in_result(self, method): def func(x): return (x - 1) res = root(func, x0=[1], method=method) assert (res.method == method)
_config def task_finetune_ind_itc_irtr_activitynet_randaug(): exp_name = 'finetune_itc_irtr_activitynet_randaug' datasets = ['activitynet'] train_transform_keys = ['pixelbert_randaug'] loss_names = _loss_names({'ind_itc': 1}) batch_size = 1024 max_epoch = 200 max_steps = None warmup_steps = 0.1 retrieval_views = 3 get_recall_metric = False get_itc_recall_metric = False get_ind_recall_metric = True draw_false_text = 10 learning_rate = 0.0003
def test_spinner_initializes_with_default_values(): with Spinner() as spinner: assert (spinner.message == 'Loading...') assert (spinner.delay == 0.1)
class DataLoader(torch.utils.data.DataLoader): def __init__(self, vocab_json, kb_pt, question_pt, batch_size, training=False): vocab = load_vocab(vocab_json) inputs = [] with open(question_pt, 'rb') as f: for _ in range(3): inputs.append(pickle.load(f)) with open(kb_pt, 'rb') as f: self.node_descs = torch.LongTensor(pickle.load(f)) self.triples = torch.LongTensor(pickle.load(f)) dataset = Dataset(inputs) super().__init__(dataset, batch_size=batch_size, shuffle=training, collate_fn=collate) self.vocab = vocab
class AlbertForMultipleChoice(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def consolidate_edges(sdfg: SDFG, starting_scope=None) -> int: from dace.sdfg.propagation import propagate_memlets_scope total_consolidated = 0 for state in sdfg.states(): if (starting_scope and (starting_scope.entry not in state.nodes())): continue queue = ([starting_scope] if starting_scope else state.scope_leaves()) next_queue = [] while (len(queue) > 0): for scope in queue: (propagate_entry, propagate_exit) = (False, False) consolidated = consolidate_edges_scope(state, scope.entry) total_consolidated += consolidated if (consolidated > 0): propagate_entry = True consolidated = consolidate_edges_scope(state, scope.exit) total_consolidated += consolidated if (consolidated > 0): propagate_exit = True propagate_memlets_scope(sdfg, state, scope, propagate_entry, propagate_exit) if (scope.parent is not None): next_queue.append(scope.parent) queue = next_queue next_queue = [] if (starting_scope is not None): break return total_consolidated
def require_torch_tpu(test_case): if (not _torch_tpu_available): return unittest.skip('test requires PyTorch TPU') else: return test_case
def test_preprocess(): root_path = 'tests/data/dataset_sample' output_path = '/tmp/preprocessed_npzs' os.makedirs(output_path, exist_ok=True) PW3D_ROOT = osp.join(root_path, 'pw3d') cfg = dict(type='Pw3dConverter', modes=['train', 'test']) data_converter = build_data_converter(cfg) data_converter.convert(PW3D_ROOT, output_path) assert osp.exists(osp.join(output_path, 'pw3d_test.npz')) assert osp.exists(osp.join(output_path, 'pw3d_train.npz')) H36M_ROOT = osp.join(root_path, 'h36m') cfg = dict(type='H36mConverter', modes=['train', 'valid'], protocol=1, mosh_dir='tests/data/dataset_sample/h36m_mosh') data_converter = build_data_converter(cfg) data_converter.convert(H36M_ROOT, output_path) cfg = dict(type='H36mConverter', modes=['valid'], protocol=2) data_converter = build_data_converter(cfg) data_converter.convert(H36M_ROOT, output_path) cfg = dict(type='H36mConverter', modes=['train'], protocol=1) data_converter = build_data_converter(cfg) data_converter.convert(H36M_ROOT, output_path) assert osp.exists(osp.join(output_path, 'h36m_train.npz')) assert osp.exists(osp.join(output_path, 'h36m_mosh_train.npz')) assert osp.exists(osp.join(output_path, 'h36m_valid_protocol1.npz')) assert osp.exists(osp.join(output_path, 'h36m_valid_protocol2.npz')) COCO_ROOT = osp.join(root_path, 'coco') cfg = dict(type='CocoConverter') data_converter = build_data_converter(cfg) data_converter.convert(COCO_ROOT, output_path) assert osp.exists(osp.join(output_path, 'coco_2014_train.npz')) MPI_INF_3DHP_ROOT = osp.join(root_path, 'mpi_inf_3dhp') cfg = dict(type='MpiInf3dhpConverter', modes=['train', 'test']) data_converter = build_data_converter(cfg) data_converter.convert(MPI_INF_3DHP_ROOT, output_path) assert osp.exists(osp.join(output_path, 'mpi_inf_3dhp_test.npz')) assert osp.exists(osp.join(output_path, 'mpi_inf_3dhp_train.npz')) MPII_ROOT = osp.join(root_path, 'mpii') cfg = dict(type='MpiiConverter') data_converter = build_data_converter(cfg) data_converter.convert(MPII_ROOT, output_path) assert osp.exists(osp.join(output_path, 'mpii_train.npz')) PENN_ACTION_ROOT = osp.join(root_path, 'Penn_Action') cfg = dict(type='PennActionConverter') data_converter = build_data_converter(cfg) data_converter.convert(PENN_ACTION_ROOT, output_path) assert osp.exists(osp.join(output_path, 'penn_action_train.npz')) AGORA_ROOT = osp.join(root_path, 'agora') cfg = dict(type='AgoraConverter', modes=['train', 'validation'], fit='smplx') data_converter = build_data_converter(cfg) data_converter.convert(AGORA_ROOT, output_path) assert osp.exists(osp.join(output_path, 'agora_train_smplx.npz')) assert osp.exists(osp.join(output_path, 'agora_validation_smplx.npz')) LSP_ORIGINAL_ROOT = osp.join(root_path, 'lsp_dataset_original') cfg = dict(type='LspConverter', modes=['train']) data_converter = build_data_converter(cfg) data_converter.convert(LSP_ORIGINAL_ROOT, output_path) assert osp.exists(osp.join(output_path, 'lsp_train.npz')) LSP_ROOT = osp.join(root_path, 'lsp_dataset') cfg = dict(type='LspConverter', modes=['test']) data_converter = build_data_converter(cfg) data_converter.convert(LSP_ROOT, output_path) assert osp.exists(osp.join(output_path, 'lsp_test.npz')) HR_LSPET_ROOT = osp.join(root_path, 'hr-lspet') cfg = dict(type='LspExtendedConverter') data_converter = build_data_converter(cfg) data_converter.convert(HR_LSPET_ROOT, output_path) assert osp.exists(osp.join(output_path, 'lspet_train.npz')) UP3D_ROOT = osp.join(root_path, 'up-3d') cfg = dict(type='Up3dConverter', modes=['trainval', 'test']) data_converter = build_data_converter(cfg) data_converter.convert(UP3D_ROOT, output_path) assert osp.exists(osp.join(output_path, 'up3d_trainval.npz')) assert osp.exists(osp.join(output_path, 'up3d_test.npz')) COCO_WHOLEBODY_ROOT = osp.join(root_path, 'coco_wholebody') cfg = dict(type='CocoWholebodyConverter', modes=['train', 'val']) data_converter = build_data_converter(cfg) data_converter.convert(COCO_WHOLEBODY_ROOT, output_path) assert osp.exists(osp.join(output_path, 'coco_wholebody_train.npz')) assert osp.exists(osp.join(output_path, 'coco_wholebody_val.npz')) AMASS_ROOT = osp.join(root_path, 'AMASS_file') cfg = dict(type='AmassConverter') data_converter = build_data_converter(cfg) data_converter.convert(AMASS_ROOT, output_path) assert osp.exists(osp.join(output_path, 'amass.npz')) POSETRACK_ROOT = osp.join(root_path, 'PoseTrack/data') cfg = dict(type='PosetrackConverter', modes=['train', 'val']) data_converter = build_data_converter(cfg) data_converter.convert(POSETRACK_ROOT, output_path) assert osp.exists(osp.join(output_path, 'posetrack_train.npz')) assert osp.exists(osp.join(output_path, 'posetrack_val.npz')) EFT_ROOT = os.path.join(root_path, 'eft') cfg = dict(type='EftConverter', modes=['coco_all', 'coco_part', 'mpii', 'lspet']) data_converter = build_data_converter(cfg) data_converter.convert(EFT_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'eft_coco_all.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'eft_coco_part.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'eft_mpii.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'eft_lspet.npz')) CROWDPOSE_ROOT = os.path.join(root_path, 'Crowdpose') cfg = dict(type='CrowdposeConverter', modes=['train', 'val', 'test', 'trainval']) data_converter = build_data_converter(cfg) data_converter.convert(CROWDPOSE_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'crowdpose_val.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'crowdpose_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'crowdpose_test.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'crowdpose_trainval.npz')) SURREAL_ROOT = os.path.join(root_path, 'SURREAL/cmu') cfg = dict(type='SurrealConverter', modes=['train', 'val', 'test'], run=0) data_converter = build_data_converter(cfg) data_converter.convert(SURREAL_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'surreal_val_run0.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'surreal_train_run0.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'surreal_test_run0.npz')) HYBRIK_ROOT = os.path.join(root_path, 'hybrik_data') cfg = dict(type='Pw3dHybrIKConverter') data_converter = build_data_converter(cfg) data_converter.convert(HYBRIK_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'hybrik_pw3d_test.npz')) cfg = dict(type='H36mHybrIKConverter', modes=['train', 'test']) data_converter = build_data_converter(cfg) data_converter.convert(HYBRIK_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'hybrik_h36m_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'hybrik_h36m_valid_protocol2.npz')) cfg = dict(type='MpiInf3dhpHybrIKConverter', modes=['train', 'test']) data_converter = build_data_converter(cfg) data_converter.convert(HYBRIK_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'hybrik_mpi_inf_3dhp_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'hybrik_mpi_inf_3dhp_test.npz')) COCO_2017_ROOT = os.path.join(root_path, 'coco_2017') cfg = dict(type='CocoHybrIKConverter') data_converter = build_data_converter(cfg) data_converter.convert(COCO_2017_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'hybrik_coco_2017_train.npz')) VIBE_ROOT = os.path.join(root_path, 'vibe_data') cfg = dict(type='InstaVibeConverter') data_converter = build_data_converter(cfg) data_converter.convert(VIBE_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'insta_variety.npz')) cfg = dict(type='VibeConverter', modes=['mpi_inf_3dhp', 'pw3d'], pretrained_ckpt=None) data_converter = build_data_converter(cfg) data_converter.convert(VIBE_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'vibe_mpi_inf_3dhp_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'vibe_pw3d_test.npz')) SPIN_ROOT = os.path.join(root_path, 'spin_data') cfg = dict(type='SpinConverter', modes=['coco_2014', 'lsp', 'mpii', 'mpi_inf_3dhp', 'lspet']) data_converter = build_data_converter(cfg) data_converter.convert(SPIN_ROOT, output_path) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'spin_coco_2014_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'spin_lsp_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'spin_mpi_inf_3dhp_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'spin_mpii_train.npz')) assert os.path.exists(('/tmp/preprocessed_npzs/' + 'spin_lspet_train.npz')) GTA_HUMAN_ROOT = os.path.join(root_path, 'gta_human_data') cfg = dict(type='GTAHumanConverter') data_converter = build_data_converter(cfg) data_converter.convert(GTA_HUMAN_ROOT, output_path) assert os.path.exists('/tmp/preprocessed_npzs/gta_human.npz') HUMMAN_ROOT = os.path.join(root_path, 'humman') cfg = dict(type='HuMManConverter', modes=['train', 'test']) data_converter = build_data_converter(cfg) data_converter.convert(HUMMAN_ROOT, output_path) assert os.path.exists('/tmp/preprocessed_npzs/humman_train_kinect_ds10_smpl.npz') assert os.path.exists('/tmp/preprocessed_npzs/humman_test_kinect_ds10_smpl.npz') assert os.path.exists('/tmp/preprocessed_npzs/humman_train_iphone_ds10_smpl.npz') assert os.path.exists('/tmp/preprocessed_npzs/humman_test_iphone_ds10_smpl.npz')
def DM_55_7_1(): from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic G = AdditiveCyclic(55) M = [[1, 7, 14, 19, 28, 33, 40, 46, 50], [2, 13, 25, 38, 52, 12, 20, 32, 45], [39, 6, 8, 26, 24, 51, 11, 34, 37], [54, 48, 41, 36, 27, 22, 15, 9, 5], [53, 42, 30, 17, 3, 43, 35, 23, 10], [16, 49, 47, 29, 31, 4, 44, 21, 18]] Mb = [[0, 0, 0, 0, 0, 0, 0]] for R in zip(*M): R = list(R) for c in range(6): Mb.append((cyclic_shift(R, c) + [0])) return (G, Mb)
class TAtomicPredicate(object): thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): _snap.TAtomicPredicate_swiginit(self, _snap.new_TAtomicPredicate(*args)) __swig_destroy__ = _snap.delete_TAtomicPredicate
class SPADEDataset(BaseDataset): def __init__(self, opt): super(SPADEDataset, self).__init__(opt) self.initialize(opt) def modify_commandline_options(parser, is_train): parser.add_argument('--no_pairing_check', action='store_true', help='If specified, skip sanity check of correct label-image file pairing') parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input') parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)') return parser def paths_match(self, path1, path2): raise NotImplementedError def __getitem__(self, index): label_path = self.label_paths[index] if ((not self.opt.load_in_memory) or (self.label_cache.get(index) is None)): label = Image.open(label_path) if self.opt.load_in_memory: self.label_cache[index] = label else: label = self.label_cache[index] params = get_params(self.opt, label.size) transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalized=False) label_tensor = (transform_label(label) * 255.0) label_tensor[(label_tensor == 255)] = self.opt.input_nc image_path = self.image_paths[index] assert self.paths_match(label_path, image_path), ("The label_path %s and image_path %s don't match." % (label_path, image_path)) if ((not self.opt.load_in_memory) or (self.image_cache.get(index) is None)): image = Image.open(image_path) if self.opt.load_in_memory: self.image_cache[index] = image else: image = self.image_cache[index] image = image.convert('RGB') transform_image = get_transform(self.opt, params) image_tensor = transform_image(image) if self.opt.no_instance: instance_tensor = 0 else: instance_path = self.instance_paths[index] if ((not self.opt.load_in_memory) or (self.instance_cache.get(index) is None)): instance = Image.open(instance_path) if self.opt.load_in_memory: self.instance_cache[index] = instance else: instance = self.instance_cache[index] if (instance.mode == 'L'): instance_tensor = (transform_label(instance) * 255) instance_tensor = instance_tensor.long() else: instance_tensor = transform_label(instance) input_dict = {'label': label_tensor, 'instance': instance_tensor, 'image': image_tensor, 'path': image_path} self.postprocess(input_dict) return input_dict def initialize(self, opt): self.opt = opt (label_paths, image_paths, instance_paths) = self.get_paths(opt) util.natural_sort(label_paths) util.natural_sort(image_paths) if (not opt.no_instance): util.natural_sort(instance_paths) if (opt.max_dataset_size > 0): label_paths = label_paths[:opt.max_dataset_size] image_paths = image_paths[:opt.max_dataset_size] instance_paths = instance_paths[:opt.max_dataset_size] if (not opt.no_pairing_check): for (path1, path2) in zip(label_paths, image_paths): assert self.paths_match(path1, path2), ('The label-image pair (%s, %s) do not look like the right pair because the filenames are quite different. Are you sure about the pairing? Please see data/pix2pix_dataset.py to see what is going on, and use --no_pairing_check to bypass this.' % (path1, path2)) self.label_paths = label_paths self.image_paths = image_paths self.instance_paths = instance_paths size = len(self.label_paths) self.dataset_size = size self.label_cache = {} self.image_cache = {} self.instance_cache = {} def postprocess(self, input_dict): return input_dict def __len__(self): if (self.opt.max_dataset_size == (- 1)): return self.dataset_size else: return self.opt.max_dataset_size def get_paths(self, opt): raise NotImplementedError
class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = (ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act) self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
def multiset_permutation_next_lex(l): i = (len(l) - 2) while ((i >= 0) and (l[i] >= l[(i + 1)])): i -= 1 if (i <= (- 1)): return 0 j = (len(l) - 1) while (l[j] <= l[i]): j -= 1 (l[i], l[j]) = (l[j], l[i]) l[(i + 1):] = l[:i:(- 1)] return 1
class SymmetricFunctionAlgebra_orthotriang(sfa.SymmetricFunctionAlgebra_generic): class Element(sfa.SymmetricFunctionAlgebra_generic.Element): pass def __init__(self, Sym, base, scalar, prefix, basis_name, leading_coeff=None): self._sym = Sym self._sf_base = base self._scalar = scalar self._leading_coeff = leading_coeff sfa.SymmetricFunctionAlgebra_generic.__init__(self, Sym, prefix=prefix, basis_name=basis_name) self._self_to_base_cache = {} self._base_to_self_cache = {} self.register_coercion(SetMorphism(Hom(base, self), self._base_to_self)) base.register_coercion(SetMorphism(Hom(self, base), self._self_to_base)) def _base_to_self(self, x): return self._from_cache(x, self._base_cache, self._base_to_self_cache) def _self_to_base(self, x): return self._sf_base._from_cache(x, self._base_cache, self._self_to_base_cache) def _base_cache(self, n): if (n in self._self_to_base_cache): return else: self._self_to_base_cache[n] = {} self._gram_schmidt(n, self._sf_base, self._scalar, self._self_to_base_cache, leading_coeff=self._leading_coeff, upper_triangular=True) self._invert_morphism(n, self.base_ring(), self._self_to_base_cache, self._base_to_self_cache, to_other_function=self._to_base) def _to_base(self, part): f = (lambda mu: self._self_to_base_cache[part].get(mu, 0)) return f def product(self, left, right): return self((self._sf_base(left) * self._sf_base(right)))
def plot_alignment(alignment, labels, filename=None): num_labels = len(labels) num_frames = len(alignment) ts = range(num_frames) if isinstance(alignment[0], list): assert (len(alignment[0]) == num_labels) ss = numpy.array(alignment).transpose() assert (ss.shape == (num_labels, num_frames)) else: assert (max(alignment) == num_labels) ss = [([0.0] * num_frames) for _ in range(num_labels)] for (t, a) in enumerate(alignment): ss[(a - 1)][t] = 1.0 fig = plt.figure(frameon=False, figsize=(5, 0.8)) extra_height = 0.2 ax = fig.add_axes([0, extra_height, 1, (1.0 - (2 * extra_height))]) ax.axis('off') assert isinstance(fig, plt.Figure) assert isinstance(ax, plt.Axes) for (i, s) in enumerate(ss): kwargs = {} if (i == (len(ss) - 1)): kwargs.update(dict(color='lightgray', linestyle='dotted')) ax.plot(ts, s, **kwargs) if (i < (len(ss) - 1)): xmax = numpy.argmax(s) ymax = s[xmax] xmax2 = xmax while (s[xmax2] == ymax): xmax2 += 1 xmax = (((xmax2 - 1) + xmax) / 2.0) ax.annotate(labels[i], xy=(xmax, ymax), ha='center', fontsize=16) if filename: filename = ('%s/%s' % (target_dir, filename)) print('save figure:', filename) fig.savefig(filename) else: plt.show()
.parametrize('curr_arch', supported_archs_offline_cache) def test_closing_offline_cache(curr_arch): for (kernel, args, get_res) in simple_kernels_to_test: _test_closing_offline_cache_for_a_kernel(curr_arch=curr_arch, kernel=kernel, args=args, result=get_res(*args))
class ResNetV1(nn.Module): def __init__(self, initial_filters, block, layers, input_channels=1): self.inplanes = initial_filters self.num_layers = len(layers) super(ResNetV1, self).__init__() self.conv1 = nn.Conv2d(input_channels, initial_filters, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(initial_filters) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) for i in range(self.num_layers): num_filters = (initial_filters * pow(2, i)) num_stride = (1 if (i == 0) else 2) setattr(self, 'layer{0}'.format((i + 1)), self._make_layer(block, num_filters, layers[i], stride=num_stride)) self.num_filter_last_seq = (initial_filters * pow(2, (self.num_layers - 1))) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) for i in range(self.num_layers): x = getattr(self, 'layer{0}'.format((i + 1)))(x) return x
class MyTestClass(): classvalue = 2 def __init__(self, n=5) -> None: self.n = n def method_jit(self, A): return (A + self.n) def method(self, A: dace.float64[20]): return (A + self.n) def __call__(self, A: dace.float64[20]): return (A * self.n) def other_method_caller(self, A: dace.float64[20]): return ((self.method(A) + 2) + self(A)) def static(A: dace.float64[20]): return (A + A) def static_withclass(A: dace.float64[20]): return (A + MyTestClass.classvalue) def clsmethod(cls, A): return (A + cls.classvalue)
def test_case99(): url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert') headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.post(url, data=json.dumps(ld_data.subdata99), headers=headers) print(r.content) print(r.status_code) assert (r.status_code == 404)
_type class Image(): def serialize(image): import cv2 return cv2.imencode('.png', image) def deserialize(encoded_image): import cv2 return cv2.imdecode(np.frombuffer(encoded_image, dtype=np.dtype(np.uint8)), cv2.IMREAD_COLOR)
def load_old_G(): with open(paths_config.stylegan2_ada_shhq, 'rb') as f: old_G = pickle.load(f)['G_ema'].to(global_config.device).eval() old_G = old_G.float() return old_G
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int=0) -> nn.Linear: index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if (layer.bias is not None): if (dim == 1): b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=(layer.bias is not None)).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if (layer.bias is not None): new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer
def train(train_loader, dev_loader, model, args): if (args.optimizer == 'Adam'): optimizer = optim.Adam(model.parameters(), lr=args.lr) elif (args.optimizer == 'SGD'): optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9) elif (args.optimizer == 'ASGD'): optimizer = optim.ASGD(model.parameters(), lr=args.lr) if args.continue_from: print("=> loading checkpoint from '{}'".format(args.continue_from)) assert os.path.isfile(args.continue_from), "=> no checkpoint found at '{}'".format(args.continue_from) checkpoint = torch.load(args.continue_from) start_epoch = checkpoint['epoch'] start_iter = checkpoint.get('iter', None) best_acc = checkpoint.get('best_acc', None) if (start_iter is None): start_epoch += 1 start_iter = 1 else: start_iter += 1 model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) else: start_epoch = 1 start_iter = 1 best_acc = None if (args.dynamic_lr and (args.optimizer != 'Adam')): scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.decay_factor, last_epoch=(- 1)) if args.cuda: model = torch.nn.DataParallel(model).cuda() model.train() for epoch in range(start_epoch, (args.epochs + 1)): if (args.dynamic_lr and (args.optimizer != 'Adam')): scheduler.step() for (i_batch, data) in enumerate(train_loader, start=start_iter): (inputs, target) = data target.sub_(1) if args.cuda: (inputs, target) = (inputs.cuda(), target.cuda()) inputs = Variable(inputs) target = Variable(target) logit = model(inputs) loss = F.nll_loss(logit, target) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm(model.parameters(), args.max_norm) optimizer.step() if args.cuda: torch.cuda.synchronize() if args.verbose: print('\nTargets, Predicates') print(torch.cat((target.unsqueeze(1), torch.unsqueeze(torch.max(logit, 1)[1].view(target.size()).data, 1)), 1)) print('\nLogit') print(logit) if ((i_batch % args.log_interval) == 0): corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum() accuracy = ((100.0 * corrects) / args.batch_size) print('Epoch[{}] Batch[{}] - loss: {:.6f} lr: {:.5f} acc: {:.3f}% ({}/{})'.format(epoch, i_batch, loss.data, optimizer.state_dict()['param_groups'][0]['lr'], accuracy, corrects, args.batch_size)) if ((i_batch % args.val_interval) == 0): (val_loss, val_acc) = eval(dev_loader, model, epoch, i_batch, optimizer, args) i_batch += 1 if (args.checkpoint and ((epoch % args.save_interval) == 0)): file_path = ('%s/CharCNN_epoch_%d.pth.tar' % (args.save_folder, epoch)) print(('\r=> saving checkpoint model to %s' % file_path)) save_checkpoint(model, {'epoch': epoch, 'optimizer': optimizer.state_dict(), 'best_acc': best_acc}, file_path) (val_loss, val_acc) = eval(dev_loader, model, epoch, i_batch, optimizer, args) if ((best_acc is None) or (val_acc > best_acc)): file_path = ('%s/CharCNN_best.pth.tar' % args.save_folder) print(('\r=> found better validated model, saving to %s' % file_path)) save_checkpoint(model, {'epoch': epoch, 'optimizer': optimizer.state_dict(), 'best_acc': best_acc}, file_path) best_acc = val_acc print('\n')
class MaxSoftmaxModel(ModelTemplate): def __init__(self, base_model, use_softmax=False): super(ModelTemplate, self).__init__() self.base_model = base_model self.use_softmax = use_softmax def forward(self, imgs): closed_set_preds = self.base_model(imgs) if self.use_softmax: closed_set_preds = torch.nn.Softmax(dim=(- 1))(closed_set_preds) open_set_preds = (- closed_set_preds.max(dim=(- 1))[0]) return (closed_set_preds, open_set_preds)
def test_keras_predictor_raises_on_sample_call() -> None: model = _DummyKerasPredictor() with pytest.raises(NotImplementedError): model.sample(empty_dataset([1], [1]).query_points, 1)
def test_suite_chop(): trunc = pp.ExceptionTruncation() chromosome = MagicMock() suite = MagicMock(test_case_chromosomes=[chromosome, chromosome]) trunc.visit_test_suite_chromosome(suite) chromosome.accept.assert_has_calls([call(trunc), call(trunc)])
def get_dataset_base_dacs(include_diffusion_data, source, target, evalScale): if (not include_diffusion_data): if evalScale: dacs_dataset_base = f'_base_/datasets/uda_{source}_to_{target}_maskrcnn_panoptic_evalScale_{evalScale}.py' else: dacs_dataset_base = f'_base_/datasets/uda_{source}_to_{target}_maskrcnn_panoptic.py' else: dacs_dataset_base = f'_base_/datasets/uda_{source}_to_{target}_maskrcnn_panoptic_diffusion.py' return dacs_dataset_base
class DataCollatorCTCWithPadding(): processor: Wav2Vec2Processor padding: Union[(bool, str)] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[(str, Union[(List[int], torch.Tensor)])]]) -> Dict[(str, torch.Tensor)]: input_features = [{'input_values': feature['input_values']} for feature in features] label_features = [{'input_ids': feature['labels']} for feature in features] batch = self.processor.pad(input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt') with self.processor.as_target_processor(): labels_batch = self.processor.pad(label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt') labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), (- 100)) batch['labels'] = labels return batch
def _instrument(sdfg: dace.SDFG, instr: dace.DataInstrumentationType, ignore: Optional[str]=None): for (node, _) in sdfg.all_nodes_recursive(): if isinstance(node, nodes.AccessNode): if (ignore and (ignore in node.data)): node.instrument = dace.DataInstrumentationType.No_Instrumentation else: node.instrument = instr
class ReLU6(Module): def __init__(self, inplace=False): super(ReLU6, self).__init__() self.inplace = inplace def updateOutput(self, input): self._backend.HardTanh_updateOutput(self._backend.library_state, input, self.output, 0, 6, self.inplace) return self.output def updateGradInput(self, input, gradOutput): self._backend.HardTanh_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, 0, 6, self.inplace) return self.gradInput
def remove_node_between_two_nodes(graph: Graph, node_to_remove: BaseNode, first_node: BaseNode, last_node: BaseNode): e_attr = graph.get_edge_data(first_node, node_to_remove) assert (len(list(e_attr.values())) == 1) e_attr = list(e_attr.values())[0] graph.add_edge(first_node, last_node, **e_attr) graph.remove_edge(first_node, node_to_remove) graph.remove_edge(node_to_remove, last_node) graph.remove_node(node_to_remove)
class SawyerDoorOpenV1Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'door_pos': obs[3:6], 'unused_info': obs[6:]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0) action['grab_effort'] = 1.0 return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_door = o_d['door_pos'] pos_door[0] -= 0.05 if (np.linalg.norm((pos_curr[:2] - pos_door[:2])) > 0.08): return (pos_door + np.array([0.0, 0.075, 0.2])) elif (abs((pos_curr[2] - pos_door[2])) > 0.04): return (pos_door + np.array([0.0, 0.075, 0.0])) else: return pos_door
def tlarray(A: dace.int32[128]): tmp = dace.ndarray([128], dace.int32, storage=dace.StorageType.CPU_ThreadLocal) for i in dace.map[0:128]: with dace.tasklet: t = omp_get_thread_num() (t >> tmp[i]) for i in dace.map[0:128]: with dace.tasklet: (t << tmp[i]) (o >> A[i]) o = t
def test_all_labels(): for label in LABELS: assert (decode(b'', label) == ('', lookup(label))) assert (encode('', label) == b'') for repeat in [0, 1, 12]: (output, _) = iter_decode(([b''] * repeat), label) assert (list(output) == []) assert (list(iter_encode(([''] * repeat), label)) == []) decoder = IncrementalDecoder(label) assert (decoder.decode(b'') == '') assert (decoder.decode(b'', final=True) == '') encoder = IncrementalEncoder(label) assert (encoder.encode('') == b'') assert (encoder.encode('', final=True) == b'') for name in set(LABELS.values()): assert (lookup(name).name == name)
def download_url_to_file(url, dst, hash_prefix=None, progress=True): file_size = None req = Request(url, headers={'User-Agent': 'torch.hub'}) u = urlopen(req) meta = u.info() if hasattr(meta, 'getheaders'): content_length = meta.getheaders('Content-Length') else: content_length = meta.get_all('Content-Length') if ((content_length is not None) and (len(content_length) > 0)): file_size = int(content_length[0]) dst = os.path.expanduser(dst) dst_dir = os.path.dirname(dst) f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) try: if (hash_prefix is not None): sha256 = hashlib.sha256() with tqdm(total=file_size, disable=(not progress), unit='B', unit_scale=True, unit_divisor=1024) as pbar: while True: buffer = u.read(8192) if (len(buffer) == 0): break f.write(buffer) if (hash_prefix is not None): sha256.update(buffer) pbar.update(len(buffer)) f.close() if (hash_prefix is not None): digest = sha256.hexdigest() if (digest[:len(hash_prefix)] != hash_prefix): raise RuntimeError('invalid hash value (expected "{}", got "{}")'.format(hash_prefix, digest)) shutil.move(f.name, dst) finally: f.close() if os.path.exists(f.name): os.remove(f.name)
def discriminator_loss(disc_real_output, disc_generated_output): real_loss = loss(tf.ones_like(disc_real_output), disc_real_output) generated_loss = loss(tf.zeros_like(disc_generated_output), disc_generated_output) total_disc_loss = (real_loss + generated_loss) return total_disc_loss
class CfgNode(dict): IMMUTABLE = '__immutable__' DEPRECATED_KEYS = '__deprecated_keys__' RENAMED_KEYS = '__renamed_keys__' NEW_ALLOWED = '__new_allowed__' def __init__(self, init_dict: Optional[dict]=None, key_list: Optional[list]=None, new_allowed: Optional[bool]=False): init_dict = ({} if (init_dict is None) else init_dict) key_list = ([] if (key_list is None) else key_list) init_dict = self._create_config_tree_from_dict(init_dict, key_list) super(CfgNode, self).__init__(init_dict) self.__dict__[CfgNode.IMMUTABLE] = False self.__dict__[CfgNode.DEPRECATED_KEYS] = set() self.__dict__[CfgNode.RENAMED_KEYS] = {} self.__dict__[CfgNode.NEW_ALLOWED] = new_allowed def _create_config_tree_from_dict(cls, init_dict: dict, key_list: list): d = copy.deepcopy(init_dict) for (k, v) in d.items(): if isinstance(v, dict): d[k] = cls(v, key_list=(key_list + [k])) else: _assert_with_logging(_valid_type(v, allow_cfg_node=False), 'Key {} with value {} is not a valid type; valid types: {}'.format('.'.join((key_list + [k])), type(v), _VALID_TYPES)) return d def __getattr__(self, name: str): if (name in self): return self[name] else: raise AttributeError(name) def __setattr__(self, name: str, value): if self.is_frozen(): raise AttributeError('Attempted to set {} to {}, but CfgNode is immutable'.format(name, value)) _assert_with_logging((name not in self.__dict__), 'Invalid attempt to modify internal CfgNode state: {}'.format(name)) _assert_with_logging(_valid_type(value, allow_cfg_node=True), 'Invalid type {} for key {}; valid types = {}'.format(type(value), name, _VALID_TYPES)) self[name] = value def __str__(self): def _indent(s_, num_spaces): s = s_.split('\n') if (len(s) == 1): return s_ first = s.pop(0) s = [((num_spaces * ' ') + line) for line in s] s = '\n'.join(s) s = ((first + '\n') + s) return s r = '' s = [] for (k, v) in sorted(self.items()): separator = ('\n' if isinstance(v, CfgNode) else ' ') attr_str = '{}:{}{}'.format(str(k), separator, str(v)) attr_str = _indent(attr_str, 2) s.append(attr_str) r += '\n'.join(s) return r def __repr__(self): return '{}({})'.format(self.__class__.__name__, super(CfgNode, self).__repr__()) def dump(self, **kwargs): def _convert_to_dict(cfg_node, key_list): if (not isinstance(cfg_node, CfgNode)): _assert_with_logging(_valid_type(cfg_node), 'Key {} with value {} is not a valid type; valid types: {}'.format('.'.join(key_list), type(cfg_node), _VALID_TYPES)) return cfg_node else: cfg_dict = dict(cfg_node) for (k, v) in cfg_dict.items(): cfg_dict[k] = _convert_to_dict(v, (key_list + [k])) return cfg_dict self_as_dict = _convert_to_dict(self, []) return yaml.safe_dump(self_as_dict, **kwargs) def merge_from_file(self, cfg_filename: str): with open(cfg_filename, 'r') as f: cfg = self.load_cfg(f) self.merge_from_other_cfg(cfg) def merge_from_other_cfg(self, cfg_other): _merge_a_into_b(cfg_other, self, self, []) def merge_from_list(self, cfg_list: list): _assert_with_logging(((len(cfg_list) % 2) == 0), 'Override list has odd lengths: {}; it must be a list of pairs'.format(cfg_list)) root = self for (full_key, v) in zip(cfg_list[0::2], cfg_list[1::2]): if root.key_is_deprecated(full_key): continue if root.key_is_renamed(full_key): root.raise_key_rename_error(full_key) key_list = full_key.split('.') d = self for subkey in key_list[:(- 1)]: _assert_with_logging((subkey in d), 'Non-existent key: {}'.format(full_key)) d = d[subkey] subkey = key_list[(- 1)] _assert_with_logging((subkey in d), 'Non-existent key: {}'.format(full_key)) value = self._decode_cfg_value(v) value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key) d[subkey] = value def freeze(self): self._immutable(True) def defrost(self): self._immutable(False) def is_frozen(self): return self.__dict__[CfgNode.IMMUTABLE] def _immutable(self, is_immutable: bool): self.__dict__[CfgNode.IMMUTABLE] = is_immutable for v in self.__dict__.values(): if isinstance(v, CfgNode): v._immutable(is_immutable) for v in self.values(): if isinstance(v, CfgNode): v._immutable(is_immutable) def clone(self): return copy.deepcopy(self) def register_deprecated_key(self, key: str): _assert_with_logging((key not in self.__dict__[CfgNode.DEPRECATED_KEYS]), 'key {} is already registered as a deprecated key'.format(key)) self.__dict__[CfgNode.DEPRECATED_KEYS].add(key) def register_renamed_key(self, old_name: str, new_name: str, message: Optional[str]=None): _assert_with_logging((old_name not in self.__dict__[CfgNode.RENAMED_KEYS]), 'key {} is already registered as a renamed cfg key'.format(old_name)) value = new_name if message: value = (new_name, message) self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value def key_is_deprecated(self, full_key: str): if (full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]): logger.warning('deprecated config key (ignoring): {}'.format(full_key)) return True return False def key_is_renamed(self, full_key: str): return (full_key in self.__dict__[CfgNode.RENAMED_KEYS]) def raise_key_rename_error(self, full_key: str): new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key] if isinstance(new_key, tuple): msg = (' Note: ' + new_key[1]) new_key = new_key[0] else: msg = '' raise KeyError('Key {} was renamed to {}; please update your config.{}'.format(full_key, new_key, msg)) def is_new_allowed(self): return self.__dict__[CfgNode.NEW_ALLOWED] def load_cfg(cls, cfg_file_obj_or_str): _assert_with_logging(isinstance(cfg_file_obj_or_str, (_FILE_TYPES + (str,))), 'Expected first argument to be of type {} or {}, but got {}'.format(_FILE_TYPES, str, type(cfg_file_obj_or_str))) if isinstance(cfg_file_obj_or_str, str): return cls._load_cfg_from_yaml_str(cfg_file_obj_or_str) elif isinstance(cfg_file_obj_or_str, _FILE_TYPES): return cls._load_cfg_from_file(cfg_file_obj_or_str) else: raise NotImplementedError("Impossible to reach here (unless there's a bug)") def _load_cfg_from_file(cls, file_obj): (_, file_ext) = os.path.splitext(file_obj.name) if (file_ext in _YAML_EXTS): return cls._load_cfg_from_yaml_str(file_obj.read()) elif (file_ext in _PY_EXTS): return cls._load_cfg_py_source(file_obj.name) else: raise Exception('Attempt to load from an unsupported filetype {}; only {} supported'.format(_YAML_EXTS.union(_PY_EXTS))) def _load_cfg_from_yaml_str(cls, str_obj): cfg_as_dict = yaml.safe_load(str_obj) return cls(cfg_as_dict) def _load_cfg_py_source(cls, filename): module = _load_module_from_file('yacs.config.override', filename) _assert_with_logging(hasattr(module, 'cfg'), "Python module from file {} must export a 'cfg' attribute".format(filename)) VALID_ATTR_TYPES = {dict, CfgNode} _assert_with_logging((type(module.cfg) in VALID_ATTR_TYPES), "Import module 'cfg' attribute must be in {} but is {}".format(VALID_ATTR_TYPES, type(module.cfg))) return cls(module.cfg) def _decode_cfg_value(cls, value): if isinstance(value, dict): return cls(value) if (not isinstance(value, str)): return value try: value = literal_eval(value) except ValueError: pass except SyntaxError: pass return value
def _get_disc_decomp(): from torch._decomp import get_decompositions aten = torch.ops.aten decompositions_dict = get_decompositions([aten.var_mean, aten._adaptive_avg_pool2d_backward, aten.addcmul, aten.avg_pool2d_backward, aten.binary_cross_entropy_with_logits, aten.gelu, aten.gelu_backward, aten.glu_backward, aten.grid_sampler_2d, aten.hardsigmoid, aten.hardsigmoid_backward, aten.hardswish, aten.hardswish_backward, aten.hardtanh, aten.hardtanh_backward, aten.logsumexp.default, aten.max_pool2d_with_indices_backward, aten.mse_loss, aten.mse_loss_backward, aten.mv, aten.narrow, aten.native_batch_norm, aten.native_batch_norm_backward, aten.native_dropout_backward, aten.native_group_norm, aten.native_group_norm_backward, aten.native_layer_norm, aten.native_layer_norm_backward, aten.std_mean.correction, aten._softmax, aten._softmax_backward_data, aten.stack, aten.t, aten.tanh_backward, aten.threshold_backward, aten.transpose.int, aten.tril.default, aten.upsample_bilinear2d.vec, aten.upsample_nearest2d_backward, aten._unsafe_view]) return decompositions_dict
def test_option_unknown_2_parm(): text = 'option[unknown, parameters={"foo": "bar"}]' parsedtype = ak.types.from_datashape(text, highlevel=False) assert isinstance(parsedtype, ak.types.OptionType) assert (str(parsedtype) == text)
def register_Ns3HashFunctionMurmur3_methods(root_module, cls): cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) cls.add_constructor([]) cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) cls.add_method('clear', 'void', [], is_virtual=True) return
class VGGM_conv5_body(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 96, (7, 7), (2, 2)) self.relu1 = nn.ReLU(True) self.norm1 = SpatialCrossMapLRN(5, 0.0005, 0.75, 2) self.pool1 = nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True) self.conv2 = nn.Conv2d(96, 256, (5, 5), (2, 2), (1, 1)) self.relu2 = nn.ReLU(True) self.norm2 = SpatialCrossMapLRN(5, 0.0005, 0.75, 2) self.pool2 = nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True) self.conv3 = nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1)) self.relu3 = nn.ReLU(True) self.conv4 = nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)) self.relu4 = nn.ReLU(True) self.conv5 = nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1)) self.relu5 = nn.ReLU(True) self.spatial_scale = (1.0 / 16.0) self.dim_out = 512 self._init_modules() def _init_modules(self): freeze_params(self.conv1) def detectron_weight_mapping(self): return vgg_detectron_weight_mapping(self) def forward(self, x): for m in self.children(): x = m(x) return x
class HyperbolicGeodesicUHP(HyperbolicGeodesic): def reflection_involution(self): (x, y) = (real(k.coordinates()) for k in self.ideal_endpoints()) if (x == infinity): M = matrix([[1, ((- 2) * y)], [0, (- 1)]]) elif (y == infinity): M = matrix([[1, ((- 2) * x)], [0, (- 1)]]) else: M = matrix([[((x + y) / (y - x)), ((((- 2) * x) * y) / (y - x))], [(2 / (y - x)), ((- (x + y)) / (y - x))]]) return self._model.get_isometry(M) def plot(self, boundary=True, **options): opts = {'axes': False, 'aspect_ratio': 1} opts.update(self.graphics_options()) opts.update(options) (end_1, end_2) = (CC(k.coordinates()) for k in self.endpoints()) (bd_1, bd_2) = (CC(k.coordinates()) for k in self.ideal_endpoints()) if ((abs((real(end_1) - real(end_2))) < EPSILON) or (CC(infinity) in [end_1, end_2])): if (end_1 == CC(infinity)): end_1 = (real(end_2), (imag(end_2) + 10)) end_2 = (real(end_2), imag(end_2)) elif (end_2 == CC(infinity)): end_2 = (real(end_1), (imag(end_1) + 10)) end_1 = (real(end_1), imag(end_1)) else: end_1 = (real(end_1), imag(end_1)) end_2 = (real(end_2), imag(end_2)) pic = bezier_path([[end_1, end_2]], **opts) if boundary: cent = min(bd_1, bd_2) bd_dict = {'bd_min': (cent - 3), 'bd_max': (cent + 3)} bd_pic = self._model.get_background_graphic(**bd_dict) pic += bd_pic return pic else: center = ((bd_1 + bd_2) / 2) radius = (abs((bd_1 - bd_2)) / 2) theta1 = CC((end_1 - center)).arg() theta2 = CC((end_2 - center)).arg() if (abs((theta1 - theta2)) < EPSILON): theta2 += pi pic = arc((real(center), imag(center)), radius, sector=(theta1, theta2), **opts) if boundary: (shadow_1, shadow_2) = (real(k) for k in [end_1, end_2]) midpoint = ((shadow_1 + shadow_2) / 2) length = abs((shadow_1 - shadow_2)) bd_dict = {'bd_min': (midpoint - length), 'bd_max': (midpoint + length)} bd_pic = self._model.get_background_graphic(**bd_dict) pic += bd_pic return pic def ideal_endpoints(self): start = self._start.coordinates() end = self._end.coordinates() [x1, x2] = [real(k) for k in [start, end]] [y1, y2] = [imag(k) for k in [start, end]] M = self._model if CC(start).is_infinity(): return [M.get_point(start), M.get_point(x2)] if CC(end).is_infinity(): return [M.get_point(x1), M.get_point(end)] if (x1 == x2): return [M.get_point(x1), M.get_point(infinity)] c = ((((x1 + x2) * (x2 - x1)) + ((y1 + y2) * (y2 - y1))) / (2 * (x2 - x1))) r = sqrt((((c - x1) ** 2) + (y1 ** 2))) return [M.get_point((c - r)), M.get_point((c + r))] def common_perpendicular(self, other): if (other._model is not self._model): other = other.to_model(self._model) A = self.reflection_involution() B = other.reflection_involution() C = (A * B) if (C.classification() != 'hyperbolic'): raise ValueError(('geodesics intersect; ' + 'no common perpendicular exists')) return C.fixed_point_set() def intersection(self, other): UHP = self.model() if (other.model() != UHP): other = other.to_model(UHP) (i_start_1, i_end_1) = sorted(self.ideal_endpoints(), key=str) (i_start_2, i_end_2) = sorted(other.ideal_endpoints(), key=str) (start_1, end_1) = (CC(x.coordinates()) for x in self.endpoints()) (start_2, end_2) = (CC(x.coordinates()) for x in other.endpoints()) if (start_1.real() > end_1.real()): (start_1, end_1) = (end_1, start_1) elif (start_1.real() == end_1.real()): if (start_1.imag() > end_1.imag()): (start_1, end_1) = (end_1, start_1) if (start_2.real() > end_2.real()): (start_2, end_2) = (end_2, start_2) elif (start_2.real() == end_2.real()): if (start_2.imag() > end_2.imag()): (start_2, end_2) = (end_2, start_2) if ((i_start_1 == i_start_2) and (i_end_1 == i_end_2)): if ((start_1 == start_2) and (end_1 == end_2)): return self if ((start_1.real() == end_1.real()) or end_1.real().is_infinity()): if (start_2.imag() < start_1.imag()): (start_1, start_2) = (start_2, start_1) (end_1, end_2) = (end_2, end_1) if (end_1 == start_2): return [UHP.get_point(end_1)] elif (end_1.real().is_infinity() and end_2.real().is_infinity()): return UHP.get_geodesic(start_2, end_2) elif (end_1.imag() < start_2.imag()): return [] else: return UHP.get_geodesic(start_2, end_1) else: if (start_2.real() < start_1.real()): (start_1, start_2) = (start_2, start_1) (end_1, end_2) = (end_2, end_1) if (end_1 == start_2): return [UHP.get_point(end_1)] elif (end_1.real() < start_2.real()): return [] else: return UHP.get_geodesic(start_2, end_1) else: if (start_2.real() < start_1.real()): (start_1, start_2) = (start_2, start_1) (end_1, end_2) = (end_2, end_1) if self.is_asymptotically_parallel(other): if (start_1 == start_2): return [UHP.get_point(start_1)] elif ((end_1 == start_2) or (end_1 == end_2)): return [UHP.get_point(end_1)] else: return [] else: A = self.reflection_involution() B = other.reflection_involution() C = (A * B) if (C.classification() in ['hyperbolic', 'parabolic']): return [] elif (end_1 == start_2): return [UHP().get_point(end_1)] else: P = CC(C.fixed_point_set()[0].coordinates()) if ((start_1.real() <= P.real() <= end_1.real()) and (start_2.real() <= P.real() <= end_2.real())): return C.fixed_point_set() else: return [] def perpendicular_bisector(self): if (self.length() == infinity): raise ValueError('the length must be finite') start = self._start.coordinates() end = self._end.coordinates() if ((real((start - end)) > EPSILON) or ((abs(real((start - end))) < EPSILON) and (imag((start - end)) > 0))): (start, end) = (end, start) S = self.complete()._to_std_geod(start) d = (self._model._dist_points(start, end) / 2) T1 = matrix([[exp((d / 2)), 0], [0, exp(((- d) / 2))]]) s2 = (sqrt(2) / 2) T2 = matrix([[s2, (- s2)], [s2, s2]]) isom_mtrx = ((S.inverse() * (T1 * T2)) * S) if ((isom_mtrx - isom_mtrx.conjugate()).norm() < (5 * EPSILON)): isom_mtrx = ((isom_mtrx + isom_mtrx.conjugate()) / 2) H = self._model._Isometry(self._model, isom_mtrx, check=False) return self._model.get_geodesic(H(self._start), H(self._end)) def midpoint(self): from sage.matrix.matrix_symbolic_dense import Matrix_symbolic_dense if (self.length() == infinity): raise ValueError('the length must be finite') start = self._start.coordinates() end = self._end.coordinates() d = (self._model._dist_points(start, end) / 2) if ((real((start - end)) > EPSILON) or ((abs(real((start - end))) < EPSILON) and (imag((start - end)) > 0))): (start, end) = (end, start) S = self.complete()._to_std_geod(start) if isinstance(S, Matrix_symbolic_dense): S = S.simplify_full().simplify_full() S_1 = S.inverse() T = matrix([[exp(d), 0], [0, 1]]) M = ((S_1 * T) * S) P_3 = moebius_transform(M, start) return self._model.get_point(P_3) def angle(self, other): if self.is_parallel(other): raise ValueError('geodesics do not intersect') if (other._model is not self._model): other = other.to_model(self._model) (a1, a2) = (self.start().coordinates(), self.end().coordinates()) (b1, b2) = (other.start().coordinates(), other.end().coordinates()) if ((abs((a2 - a1)) < EPSILON) or (abs((b2 - b1)) < EPSILON)): raise ValueError('intersecting geodesic is a point') (p1, p2) = (p.coordinates() for p in self.ideal_endpoints()) (q1, q2) = (p.coordinates() for p in other.ideal_endpoints()) if ((infinity in [p1, p2]) and (infinity in [q1, q2])): return 0 v = ((abs((p1 - q1)) < EPSILON) and (abs((p2 - q2)) < EPSILON)) w = ((abs((p1 - q2)) < EPSILON) and (abs((p2 - q1)) < EPSILON)) if (v or w): return 0 if (infinity in [q1, q2]): (p1, p2, q1, q2) = (q1, q2, p1, p2) if (p1 == infinity): (p1, p2) = (p2, p1) if (p2 == infinity): q1 = (q1 - p1) q2 = (q2 - p1) p1 = 0 if (p2 != infinity): t = HyperbolicGeodesicUHP._crossratio_matrix(p1, ((p1 + p2) / 2), p2) (q1, q2) = (moebius_transform(t, q) for q in [q1, q2]) return arccos((abs((q1 + q2)) / abs((q2 - q1)))) def _get_B(a): from sage.structure.element import Element from sage.symbolic.expression import Expression from sage.rings.complex_double import CDF if isinstance(a, (int, float, complex)): a = CDF(a) if isinstance(a, Expression): P = SR zero = SR.zero() one = SR.one() I = SR('I') elif isinstance(a, Element): P = a.parent() zero = P.zero() one = P.one() I = P.gen() if (I.is_one() or (I * I).is_one() or (not ((- I) * I).is_one())): raise ValueError('invalid number') else: raise ValueError('not a complex number') return matrix(P, 2, [one, zero, zero, (- I)]) def _to_std_geod(self, p): [s, e] = [k.coordinates() for k in self.complete().endpoints()] B = HyperbolicGeodesicUHP._get_B(p) outmat = (B * HyperbolicGeodesicUHP._crossratio_matrix(s, p, e)) outmat = (outmat / outmat.det().sqrt()) if ((outmat - outmat.conjugate()).norm(1) < (10 ** (- 9))): outmat = ((outmat + outmat.conjugate()) / 2) return outmat def _crossratio_matrix(p0, p1, p2): if (p0 == infinity): return matrix([[0, (- (p1 - p2))], [(- 1), p2]]) elif (p1 == infinity): return matrix([[1, (- p0)], [1, (- p2)]]) elif (p2 == infinity): return matrix([[1, (- p0)], [0, (p1 - p0)]]) return matrix([[(p1 - p2), ((p1 - p2) * (- p0))], [(p1 - p0), ((p1 - p0) * (- p2))]])
def cook_test(test, ref_len_counts, eff=None, n=4): (reflen, refmaxcounts) = ref_len_counts (testlen, counts) = precook(test, n, True) result = {} if (eff == 'closest'): result['reflen'] = min(((abs((l - testlen)), l) for l in reflen))[1] else: result['reflen'] = reflen result['testlen'] = testlen result['guess'] = [max(0, ((testlen - k) + 1)) for k in range(1, (n + 1))] result['correct'] = ([0] * n) for (ngram, count) in counts.items(): result['correct'][(len(ngram) - 1)] += min(refmaxcounts.get(ngram, 0), count) return result
class DeiTModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_mha_exhaustive(): Bs = [1, 2, 4, 8] SNs = [512, 1024, 2048] SMs = [512, 1024, 2048] Hs = [16, 20, 24, 32] Ps = [64, 96, 128, 192, 384] compiled_sdfg = create_attn_forward_and_compile(iters=True) for SM in SMs: for (B, SN, H, P) in itertools.product(Bs, SNs, Hs, Ps): mha(B, SN, SM, H, P, compiled_sdfg.sdfg, compiled_sdfg)
def _remove_stopwords(text: Any, stopwords: Optional[Set[str]]=None) -> Any: if pd.isna(text): return text stopwords = (english_stopwords if (not stopwords) else stopwords) return ' '.join((word for word in str(text).split() if (word.lower() not in stopwords)))
_utils.test() def test_func_no_return(): with pytest.raises(ti.TaichiCompilationError, match='Function has a return type but does not have a return statement'): def bar() -> ti.i32: pass def foo() -> ti.i32: return bar() foo()
def bool_(string): if (string == 'True'): return True elif (string == 'False'): return False else: raise Exception(('Cannot cast %r to bool value.' % string))
def test_multiple_and_proofs(params): (p1, p2, secrets_dict) = params and_proof = AndProofStmt(p1, p2, p2, p1, p1, p1, p2) prover = and_proof.get_prover(secrets_dict) verifier = and_proof.get_verifier() assert verify(verifier, prover)
class TraceMethodCallMeta(type): def __init__(self, name, bases, dict): for (func_name, func) in dict.items(): if inspect.isfunction(func): setattr(self, func_name, print_on_call_decorator(func))
def create_feature_columns() -> Tuple[(list, list, list)]: (category_feature_columns, dense_feature_columns) = ([], []) label_feature_columns = [] videoplayseconds = fc.numeric_column('videoplayseconds', default_value=0.0) u_read_comment_7d_sum = fc.numeric_column('u_read_comment_7d_sum', default_value=0.0) u_like_7d_sum = fc.numeric_column('u_like_7d_sum', default_value=0.0) u_click_avatar_7d_sum = fc.numeric_column('u_click_avatar_7d_sum', default_value=0.0) u_forward_7d_sum = fc.numeric_column('u_forward_7d_sum', default_value=0.0) u_comment_7d_sum = fc.numeric_column('u_comment_7d_sum', default_value=0.0) u_follow_7d_sum = fc.numeric_column('u_follow_7d_sum', default_value=0.0) u_favorite_7d_sum = fc.numeric_column('u_favorite_7d_sum', default_value=0.0) i_read_comment_7d_sum = fc.numeric_column('i_read_comment_7d_sum', default_value=0.0) i_like_7d_sum = fc.numeric_column('i_like_7d_sum', default_value=0.0) i_click_avatar_7d_sum = fc.numeric_column('i_click_avatar_7d_sum', default_value=0.0) i_forward_7d_sum = fc.numeric_column('i_forward_7d_sum', default_value=0.0) i_comment_7d_sum = fc.numeric_column('i_comment_7d_sum', default_value=0.0) i_follow_7d_sum = fc.numeric_column('i_follow_7d_sum', default_value=0.0) i_favorite_7d_sum = fc.numeric_column('i_favorite_7d_sum', default_value=0.0) c_user_author_read_comment_7d_sum = fc.numeric_column('c_user_author_read_comment_7d_sum', default_value=0.0) dense_feature_columns += [videoplayseconds, u_read_comment_7d_sum, u_like_7d_sum, u_click_avatar_7d_sum, u_forward_7d_sum, u_comment_7d_sum, u_follow_7d_sum, u_favorite_7d_sum, i_read_comment_7d_sum, i_like_7d_sum, i_click_avatar_7d_sum, i_forward_7d_sum, i_comment_7d_sum, i_follow_7d_sum, i_favorite_7d_sum, c_user_author_read_comment_7d_sum] userid = fc.categorical_column_with_vocabulary_file('userid', os.path.join(FLAGS.vocabulary_dir, 'userid.txt')) feedid = fc.categorical_column_with_vocabulary_file('feedid', os.path.join(FLAGS.vocabulary_dir, 'feedid.txt')) device = fc.categorical_column_with_vocabulary_file('device', os.path.join(FLAGS.vocabulary_dir, 'device.txt')) authorid = fc.categorical_column_with_vocabulary_file('authorid', os.path.join(FLAGS.vocabulary_dir, 'authorid.txt')) bgm_song_id = fc.categorical_column_with_vocabulary_file('bgm_song_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_song_id.txt')) bgm_singer_id = fc.categorical_column_with_vocabulary_file('bgm_singer_id', os.path.join(FLAGS.vocabulary_dir, 'bgm_singer_id.txt')) manual_tag_list = fc.categorical_column_with_vocabulary_file('manual_tag_list', os.path.join(FLAGS.vocabulary_dir, 'manual_tag_id.txt')) userid_emb = fc.embedding_column(userid, FLAGS.embedding_dim) feedid_emb = fc.embedding_column(feedid, FLAGS.embedding_dim) device_emb = fc.embedding_column(device, FLAGS.embedding_dim) authorid_emb = fc.embedding_column(authorid, FLAGS.embedding_dim) bgm_song_id_emb = fc.embedding_column(bgm_song_id, FLAGS.embedding_dim) bgm_singer_id_emb = fc.embedding_column(bgm_singer_id, FLAGS.embedding_dim) manual_tag_id_emb = fc.embedding_column(manual_tag_list, FLAGS.embedding_dim, combiner='mean') category_feature_columns += [userid_emb, feedid_emb, device_emb, authorid_emb, bgm_song_id_emb, bgm_singer_id_emb, manual_tag_id_emb] read_comment = fc.numeric_column('read_comment', default_value=0.0) label_feature_columns += [read_comment] return (dense_feature_columns, category_feature_columns, label_feature_columns)
def get_oss_binary_file(test_name: str, test_type: TestType) -> str: assert (test_type in {TestType.CPP, TestType.PY}) binary_folder = get_oss_binary_folder(test_type) binary_file = os.path.join(binary_folder, test_name) if (test_type == TestType.PY): binary_file = ('python ' + binary_file) return binary_file
def _masked_coo(A, mask): row = A.row[mask] col = A.col[mask] data = A.data[mask] return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
class LimitDataTransformer(BaseEstimator, TransformerMixin): def __init__(self, num_imp): self.num_imp = num_imp def fit(self, X, *args): return self def transform(self, df): df = df[(df['impression'] > self.num_imp)] return df
.usefixtures('spark', 'columns') () def simple_dataframe(spark, columns): data = [(1, 2, 19842), (1, 4, 19844), (1, 3, 19843), (1, 5, 19845), (1, 6, 19846), (1, 7, 19847), (2, 1, 19841), (2, 2, 19842), (2, 3, 19843), (2, 4, 19844), (3, 10, 19844), (4, 11, 19843), (4, 12, 19845), (1, 1, 19841)] return spark.createDataFrame(data, schema=columns)
def filter_depcc_svo(depcc_svo_fpath, output_fpath, common_svo): with gzip.open(depcc_svo_fpath, 'rt', encoding='utf-8') as dep_in, gzip.open(output_fpath, 'wt', encoding='utf-8') as dep_out: svo_num = 0 common_svo_num = 0 for (i, line) in enumerate(dep_in): try: line = line.replace('_', '\t') if ((i % 1000000) == 0): print((i / 1000000), 'million of svo triples') (v_pos, s_pos, o_pos, freq) = line.split('\t') v = extract_lemma(v_pos) s = extract_lemma(s_pos) o = extract_lemma(o_pos) svo_num += 1 if ((s, v, o) in common_svo): dep_out.write(line) common_svo_num += 1 except KeyboardInterrupt: break except: if verbose: print('Bad line:', line) print('Number of frames svo triples:', svo_num) print('Number of common frame + dep svo triples:', common_svo_num) print('Output:', output_fpath)
def _serialize_swagger2(definitions: DefinitionList) -> Generator[((Callable | None), None, None)]: for definition in definitions: name = definition['name'] collection_format = definition.get('collectionFormat', 'csv') type_ = definition.get('type') if (definition['in'] == 'header'): (yield to_string(name)) if (type_ in ('array', 'object')): if (collection_format == 'csv'): (yield delimited(name, delimiter=',')) if (collection_format == 'ssv'): (yield delimited(name, delimiter=' ')) if (collection_format == 'tsv'): (yield delimited(name, delimiter='\t')) if (collection_format == 'pipes'): (yield delimited(name, delimiter='|'))
class MagmaGBLogPrettyPrinter(): cmd_inpt = re.compile('^>>>$') app_inpt = re.compile('^Append\\(~_sage_, 0\\);$') deg_curr = re.compile('^Basis length\\: (\\d+), queue length\\: (\\d+), step degree\\: (\\d+), num pairs\\: (\\d+)$') pol_curr = re.compile('^Number of pair polynomials\\: (\\d+), at (\\d+) column\\(s\\), .*') def __init__(self, verbosity=1, style='magma'): self.verbosity = verbosity if (style not in ['sage', 'magma']): raise ValueError('style must be sage or magma') self.style = style self.curr_deg = 0 self.curr_npairs = 0 self.max_deg = 0 self.storage = '' self.sync = None def write(self, s): (verbosity, style) = (self.verbosity, self.style) if isinstance(s, bytes): if isinstance(sys.stdout.encoding, str): s = s.decode(sys.stdout.encoding) else: s = s.decode('UTF-8') if self.storage: s = (self.storage + s) self.storage = '' for line in s.splitlines(): match = re.match(MagmaGBLogPrettyPrinter.cmd_inpt, line) if match: self.sync = 1 continue if self.sync: if (self.sync == 1): self.sync = line continue else: if (line == ''): continue self.sync = None continue if re.match(MagmaGBLogPrettyPrinter.app_inpt, line): continue if re.match(MagmaGBLogPrettyPrinter.deg_curr, line): match = re.match(MagmaGBLogPrettyPrinter.deg_curr, line) (nbasis, npairs, deg, npairs_deg) = map(int, match.groups()) self.curr_deg = deg self.curr_npairs = npairs if re.match(MagmaGBLogPrettyPrinter.pol_curr, line): match = re.match(MagmaGBLogPrettyPrinter.pol_curr, line) (pol_curr, col_curr) = map(int, match.groups()) if (pol_curr != 0): if (self.max_deg < self.curr_deg): self.max_deg = self.curr_deg if ((style == 'sage') and (verbosity >= 1)): print(('Leading term degree: %2d. Critical pairs: %d.' % (self.curr_deg, self.curr_npairs))) elif ((style == 'sage') and (verbosity >= 1)): print(('Leading term degree: %2d. Critical pairs: %d (all pairs of current degree eliminated by criteria).' % (self.curr_deg, self.curr_npairs))) if ((style == 'magma') and (verbosity >= 1)): print(line) def flush(self): import sys sys.stdout.flush()
(scope='module', params=(scipy.io._mmio, fmm), autouse=True) def implementations(request): global mminfo global mmread global mmwrite mminfo = request.param.mminfo mmread = request.param.mmread mmwrite = request.param.mmwrite
def test_contingency_table(): im_true = np.array([1, 2, 3, 4]) im_test = np.array([1, 1, 8, 8]) table1 = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.25, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.25], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.25]]) sparse_table2 = contingency_table(im_true, im_test, normalize=True) table2 = sparse_table2.toarray() assert_array_equal(table1, table2)
class AbsFrameModel(nn.Module): def input_size(self) -> int: raise NotImplementedError def output_size(self) -> int: raise NotImplementedError def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> Tuple[(torch.FloatTensor, torch.LongTensor)]: raise NotImplementedError
class SlipperyJointsHopper(RoboschoolXMLModifierMixin, ModifiableRoboschoolHopper): def __init__(self): self.friction = 0.2 with self.modify_xml('hopper.xml') as tree: for elem in tree.iterfind('default/geom'): elem.set('friction', (str(self.friction) + ' .1 .1')) RoboschoolForwardWalkerMujocoXML.__init__(self, self.model_xml, 'torso', action_dim=3, obs_dim=15, power=0.75) def parameters(self): parameters = super(SlipperyJointsHopper, self).parameters parameters.update({'friction': self.friction}) return parameters
class CmodReLU(Module): __constants__ = ['inplace'] inplace: bool def __init__(self, threshold: int=None, inplace: bool=False): super(CmodReLU, self).__init__() self.inplace = inplace if (not isinstance(threshold, float)): threshold = Parameter((torch.rand(1) * 0.25)) self.threshold = threshold def forward(self, input: Tensor) -> Tensor: return cF.cmodrelu(input, threshold=self.threshold, inplace=self.inplace) def extra_repr(self) -> str: inplace_str = ('inplace=True' if self.inplace else '') return inplace_str
def calculate_metric(data, opt): opt = deepcopy(opt) metric_type = opt.pop('type') metric = METRIC_REGISTRY.get(metric_type)(**data, **opt) return metric
def print_results(df, min_threshold): precision = (sum(df['true_positive']) / (sum(df['true_positive']) + sum(df['false_positive']))) recall = (sum(df['true_positive']) / (sum(df['true_positive']) + sum(df['false_negative']))) f1_score = (((2 * precision) * recall) / (precision + recall)) speaker_match = (sum(df['n_speaker_match']) / sum(df['true_positive'])) verb_match = (sum(df['n_verb_match']) / sum(df['true_positive'])) print('\n') print(f'Quote Extraction - {min_threshold}, Precision: {(100 * precision):.3f}%') print(f'Quote Extraction - {min_threshold}, Recall: {(100 * recall):.3f}%') print(f'Quote Extraction - {min_threshold}, F1 Score: {(100 * f1_score):.3f}%') print(f'Speaker Match - {min_threshold}, Accuracy: {(100 * speaker_match):.3f}%') print(f'Verb Match - {min_threshold}, Accuracy: {(100 * verb_match):.3f}%') print('\n')
class FacebookManagerSearchPosts(VirtualFunctionTool): name = 'FacebookManagerSearchPosts' summary = "Search for the user's own posts or other's posts by keyword." parameters: List[ArgParameter] = [{'name': 'user_id', 'type': 'string', 'description': 'The unique identifier of the user whose posts to search for.', 'required': True}, {'name': 'keyword', 'type': 'string', 'description': 'The keyword to search for.', 'required': True}, {'name': 'max_results', 'type': 'integer', 'description': 'The maximum number of search results to return. Default value is 10.', 'required': False}] returns: List[ArgReturn] = [{'name': 'posts', 'type': 'array', 'description': 'The search results containing objects with post_id and content.'}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'user_id' parameter is not found."}, {'name': 'InvalidRequestException', 'description': "The 'keyword' parameter is empty."}]
class ModulatedDeformRoIPoolingPack(DeformRoIPooling): def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, num_mask_fcs=2, deform_fc_channels=1024): super(ModulatedDeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std) self.num_offset_fcs = num_offset_fcs self.num_mask_fcs = num_mask_fcs self.deform_fc_channels = deform_fc_channels if (not no_trans): offset_fc_seq = [] ic = ((self.out_size * self.out_size) * self.out_channels) for i in range(self.num_offset_fcs): if (i < (self.num_offset_fcs - 1)): oc = self.deform_fc_channels else: oc = ((self.out_size * self.out_size) * 2) offset_fc_seq.append(nn.Linear(ic, oc)) ic = oc if (i < (self.num_offset_fcs - 1)): offset_fc_seq.append(nn.ReLU(inplace=True)) self.offset_fc = nn.Sequential(*offset_fc_seq) self.offset_fc[(- 1)].weight.data.zero_() self.offset_fc[(- 1)].bias.data.zero_() mask_fc_seq = [] ic = ((self.out_size * self.out_size) * self.out_channels) for i in range(self.num_mask_fcs): if (i < (self.num_mask_fcs - 1)): oc = self.deform_fc_channels else: oc = (self.out_size * self.out_size) mask_fc_seq.append(nn.Linear(ic, oc)) ic = oc if (i < (self.num_mask_fcs - 1)): mask_fc_seq.append(nn.ReLU(inplace=True)) else: mask_fc_seq.append(nn.Sigmoid()) self.mask_fc = nn.Sequential(*mask_fc_seq) self.mask_fc[(- 2)].weight.data.zero_() self.mask_fc[(- 2)].bias.data.zero_() def forward(self, data, rois): assert (data.size(1) == self.out_channels) if self.no_trans: offset = data.new_empty(0) return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) else: n = rois.shape[0] offset = data.new_empty(0) x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std) offset = self.offset_fc(x.view(n, (- 1))) offset = offset.view(n, 2, self.out_size, self.out_size) mask = self.mask_fc(x.view(n, (- 1))) mask = mask.view(n, 1, self.out_size, self.out_size) return (deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) * mask)
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) def test_asin_forward_backward(seed, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [np.clip(rng.randn(2, 3, 4).astype(np.float32), (- 0.9), 0.9)] function_tester(rng, F.asin, np.arcsin, inputs, ctx=ctx, func_name=func_name, atol_f=0.001, atol_b=0.01)
def number2onehot(number): onehot = [0 for i in range(12)] for i in number: onehot[i] = 1 return onehot
def make_arch_string(ordered_arg_names=[], args_to_ignore=[], **kwargs): starting_args = [str(kwargs.pop(arg_key)) for arg_key in ordered_arg_names] for arg_key in args_to_ignore: kwargs.pop(arg_key, None) remaining_tuples = kwargs.items() sorted_remaining_tuples = list(sorted(remaining_tuples)) remaining_folder_names = ['{}:{}'.format(k, v) for (k, v) in sorted_remaining_tuples] all_folders = (starting_args + remaining_folder_names) folder = os.path.join(*all_folders) return folder
def print_scores(scores, etype): turns = ['turn 1', 'turn 2', 'turn 3', 'turn 4', 'turn >4'] levels = ['easy', 'medium', 'hard', 'extra', 'all', 'joint_all'] partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] print('{:20} {:20} {:20} {:20} {:20} {:20} {:20}'.format('', *levels)) counts = [scores[level]['count'] for level in levels] print('{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}'.format('count', *counts)) if (etype in ['all', 'exec']): print(' EXECUTION ACCURACY ') this_scores = [scores[level]['exec'] for level in levels] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('execution', *this_scores)) if (etype in ['all', 'match']): print('\n EXACT MATCHING ACCURACY ') exact_scores = [scores[level]['exact'] for level in levels] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('exact match', *exact_scores)) print('\nPARTIAL MATCHING ACCURACY') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['acc'] for level in levels] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores)) print(' PARTIAL MATCHING RECALL ') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['rec'] for level in levels] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores)) print(' PARTIAL MATCHING F1 ') for type_ in partial_types: this_scores = [scores[level]['partial'][type_]['f1'] for level in levels] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores)) print('\n\n{:20} {:20} {:20} {:20} {:20} {:20}'.format('', *turns)) counts = [scores[turn]['count'] for turn in turns] print('{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}'.format('count', *counts)) if (etype in ['all', 'exec']): print(' TRUN XECUTION ACCURACY ') this_scores = [scores[turn]['exec'] for turn in turns] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('execution', *this_scores)) if (etype in ['all', 'match']): print('\n TRUN EXACT MATCHING ACCURACY ') exact_scores = [scores[turn]['exact'] for turn in turns] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('exact match', *exact_scores))
class HashUnpinned(HashError): order = 3 head = 'In --require-hashes mode, all requirements must have their versions pinned with ==. These do not:'
class TestHparamsRegistry(unittest.TestCase): (itertools.product(algorithms.ALGORITHMS, datasets.DATASETS)) def test_random_hparams_deterministic(self, algorithm_name, dataset_name): a = hparams_registry.random_hparams(algorithm_name, dataset_name, 0) b = hparams_registry.random_hparams(algorithm_name, dataset_name, 0) self.assertEqual(a.keys(), b.keys()) for key in a.keys(): self.assertEqual(a[key], b[key], key)