code
stringlengths
101
5.91M
class RemoveSymbol(PP_Module): def __init__(self, symbol): super().__init__() self.symbol = symbol assert (len(symbol) == 1) def process(self, orth: str): return orth.replace(self.symbol, '')
def _covariance_between_points_for_variational_models(kernel: gpflow.kernels.Kernel, inducing_points: TensorType, q_sqrt: TensorType, query_points_1: TensorType, query_points_2: TensorType, whiten: bool) -> TensorType: tf.debugging.assert_shapes([(query_points_1, [..., 'A', 'D']), (query_points_2, ['B', 'D'])]) num_latent = q_sqrt.shape[0] (K, Kx1, Kx2, K12) = _compute_kernel_blocks(kernel, inducing_points, query_points_1, query_points_2, num_latent) L = tf.linalg.cholesky(K) Linv_Kx1 = tf.linalg.triangular_solve(L, Kx1) Linv_Kx2 = tf.linalg.triangular_solve(L, Kx2) def _leading_mul(M_1: TensorType, M_2: TensorType, transpose_a: bool) -> TensorType: if transpose_a: return tf.einsum('...lji,ljk->...lik', M_1, M_2) else: return tf.einsum('...lij,lkj->...lik', M_1, M_2) if whiten: first_cov_term = _leading_mul(_leading_mul(Linv_Kx1, q_sqrt, transpose_a=True), _leading_mul(Linv_Kx2, q_sqrt, transpose_a=True), transpose_a=False) else: Linv_qsqrt = tf.linalg.triangular_solve(L, q_sqrt) first_cov_term = _leading_mul(_leading_mul(Linv_Kx1, Linv_qsqrt, transpose_a=True), _leading_mul(Linv_Kx2, Linv_qsqrt, transpose_a=True), transpose_a=False) second_cov_term = K12 third_cov_term = _leading_mul(Linv_Kx1, Linv_Kx2, transpose_a=True) cov = ((first_cov_term + second_cov_term) - third_cov_term) tf.debugging.assert_shapes([(query_points_1, [..., 'N', 'D']), (query_points_2, ['M', 'D']), (cov, [..., 'L', 'N', 'M'])]) return cov
def parse_doc(d) -> Document: sents = [Sentence(**s) for s in d['sentences']] doc = Document(d['name'], sents) if ('metadata' in d): for (key, value) in d['metadata'].items(): doc.props[key] = value return doc
class RandomElasticDeformation(tfm.Transform): def __init__(self, num_control_points: int=4, deformation_sigma: float=5.0, interpolators: tuple=(sitk.sitkBSpline, sitk.sitkNearestNeighbor), spatial_rank: int=2, fill_value: float=0.0, p: float=0.5, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)): super().__init__() if (len(interpolators) != len(entries)): raise ValueError('interpolators must have the same length as entries') self.num_control_points = max(num_control_points, 2) self.deformation_sigma = max(deformation_sigma, 1) self.spatial_rank = spatial_rank self.interpolators = interpolators self.fill_value = fill_value self.p = p self.entries = entries def __call__(self, sample: dict) -> dict: if (self.p < np.random.random()): return sample for entry in self.entries: if (entry not in sample): raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry)) shape = sample[self.entries[0]].shape[:self.spatial_rank] img = sitk.GetImageFromArray(np.zeros(shape)) transform_mesh_size = ([self.num_control_points] * img.GetDimension()) bspline_transformation = sitk.BSplineTransformInitializer(img, transform_mesh_size) params = bspline_transformation.GetParameters() params = np.asarray(params, dtype=np.float) params += (np.random.randn(params.shape[0]) * self.deformation_sigma) params = tuple(params) bspline_transformation.SetParameters(tuple(params)) for (interpolator_idx, entry) in enumerate(self.entries): data = sample[entry] for channel in range(data.shape[(- 1)]): img = sitk.GetImageFromArray(data[(..., channel)]) resampler = sitk.ResampleImageFilter() resampler.SetReferenceImage(img) resampler.SetInterpolator(self.interpolators[interpolator_idx]) resampler.SetDefaultPixelValue(self.fill_value) resampler.SetTransform(bspline_transformation) img_deformed = resampler.Execute(img) sample[entry][(..., channel)] = sitk.GetArrayFromImage(img_deformed) return sample
def gru_cell(input, hidden, w_ih, w_hh, b_ih, b_hh): gi = (torch.mm(input, w_ih.t()) + b_ih) gh = (torch.mm(hidden, w_hh.t()) + b_hh) (i_r, i_i, i_n) = gi.chunk(3, 1) (h_r, h_i, h_n) = gh.chunk(3, 1) resetgate = torch.sigmoid((i_r + h_r)) inputgate = torch.sigmoid((i_i + h_i)) newgate = torch.tanh((i_n + (resetgate * h_n))) hy = (newgate + (inputgate * (hidden - newgate))) return hy
def test_numpytype_datetime64_10s(): t = NumpyType('datetime64[10s]') assert (str(parser.parse(str(t))) == str(t))
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) .parametrize('inshape', [(2, 3, 4)]) .parametrize('shifts', [None, (0, 0, 2), (0, (- 2), 1), (1, 0, (- 1))]) .parametrize('border_mode', ['nearest', 'reflect']) def test_shift_forward_backward(seed, inshape, shifts, border_mode, ctx, func_name): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = [rng.randn(*inshape).astype(np.float32)] function_tester(rng, F.shift, ref_shift, inputs, func_args=[shifts, border_mode], ctx=ctx, func_name=func_name, atol_f=1e-06, atol_b=0.01)
def get_key(key_data, fn): key_map = {} df = pd.read_csv(key_data) filename = list(df['name']) key = list(df['keyname']) for (i, filename) in enumerate(filename): key_map[filename] = key[i] return key_map[fn]
class UnmaskedArrayType(ContentType, ak._lookup.UnmaskedLookup): def from_form(cls, form): return UnmaskedArrayType(ak._connect.numba.arrayview.to_numbatype(form.content), form.parameters) def __init__(self, contenttype, parameters): super().__init__(name=f'ak.UnmaskedArrayType({contenttype.name}, {json.dumps(parameters)})') self.contenttype = contenttype self.parameters = parameters def has_field(self, key): return self.contenttype.has_field(key) def getitem_at(self, viewtype): viewtype = ak._connect.numba.arrayview.wrap(self.contenttype, viewtype, None) return numba.types.optional(self.contenttype.getitem_at_check(viewtype)) def lower_getitem_at(self, context, builder, rettype, viewtype, viewval, viewproxy, attype, atval, wrapneg, checkbounds): whichpos = posat(context, builder, viewproxy.pos, self.CONTENT) nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos) atval = regularize_atval(context, builder, viewproxy, attype, atval, wrapneg, checkbounds) output = context.make_helper(builder, rettype) nextviewtype = ak._connect.numba.arrayview.wrap(self.contenttype, viewtype, None) proxynext = context.make_helper(builder, nextviewtype) proxynext.pos = nextpos proxynext.start = viewproxy.start proxynext.stop = viewproxy.stop proxynext.arrayptrs = viewproxy.arrayptrs proxynext.pylookup = viewproxy.pylookup outdata = self.contenttype.lower_getitem_at_check(context, builder, rettype.type, nextviewtype, proxynext._getvalue(), proxynext, numba.intp, atval, False, False) output.valid = numba.core.cgutils.true_bit output.data = outdata return output._getvalue() def ndim(self): return self.contenttype.ndim def inner_dtype(self): return None def is_optiontype(self): return True def is_recordtype(self): return self.contenttype.is_recordtype
def complex_array_equal(arr1, arr2, precision=5): for (c1, c2) in zip(arr1, arr2): if (abs((c1 - c2)) >= (2 ** (- precision))): return False return True
def test_regression_twopaths(netfilename): print('testing two-paths values for ', netfilename) start = time.time() g = BipartiteGraph(netfilename) g.printSummary() for i in g.nodeModeIterator(MODE_A): for j in g.nodeModeIterator(MODE_B): assert (g.twoPathsMatrix.getValue(i, j) == g.twoPaths(i, j)) print('OK,', (time.time() - start), 's') print()
def main(): parser = argparse.ArgumentParser() parser.add_argument('ann_files', nargs='+', help='COCO-format annotation file') parser.add_argument('--save_pkl', action='store_true', help='whether to save bbox dataframe as pkl') parser.add_argument('--verbose', action='store_true', help='whether to print details') parser.add_argument('--num_print', type=int, default=0, help='number to print the first few examples') args = parser.parse_args() ann_basenames = [] dfs = [] for ann_file in args.ann_files: ann_basename = os.path.splitext(os.path.basename(ann_file))[0] ann_basenames.append(ann_basename) df = calc_coco_format_stats(ann_file, verbose=args.verbose, num_print=args.num_print) dfs.append(df) df_concat = pd.concat(dfs) percentiles = [0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99] print(df_concat.describe(percentiles=percentiles)) pkl_dirname = os.path.dirname(args.ann_files[0]) pkl_basename = ('_'.join(ann_basenames) + '_stats.pkl') pkl_file = os.path.join(pkl_dirname, pkl_basename) if args.save_pkl: print('output pkl file:', pkl_file) df_concat.to_pickle(pkl_file)
def test_regular_array_3(): def func_regular_array_3(x): return (x[(0, 0)] * x[(2, 1)]) (value_jvp, jvp_grad) = jax.jvp(func_regular_array_3, (test_regulararray,), (test_regulararray_tangent,)) (value_jvp_jax, jvp_grad_jax) = jax.jvp(func_regular_array_3, (test_regulararray_jax,), (test_regulararray_tangent_jax,)) (value_vjp, vjp_func) = jax.vjp(func_regular_array_3, test_regulararray) (value_vjp_jax, vjp_func_jax) = jax.vjp(func_regular_array_3, test_regulararray_jax) assert (ak.to_list(value_jvp) == value_jvp_jax.tolist()) assert (ak.to_list(value_vjp) == value_vjp_jax.tolist()) assert (ak.to_list(jvp_grad) == jvp_grad_jax.tolist()) assert (ak.to_list(vjp_func(value_vjp)[0]) == vjp_func_jax(value_vjp_jax)[0].tolist())
def pwdist_upperbound(M1, S1, M2=None, S2=None, symmetric=False, means_only=False, diagonal_cov=False, commute=False, device=None, return_dmeans=False): if (M2 is None): symmetric = True (M2, S2) = (M1, S1) (n1, n2) = (len(M1), len(M2)) if symmetric: pairs = list(itertools.combinations(range(n1), 2)) else: pairs = list(itertools.product(range(n1), range(n2))) D = torch.zeros((n1, n2), device=device, dtype=M1.dtype) logger.info('Computing gaussian-to-gaussian wasserstein distances...') if (means_only or return_dmeans): D_means = torch.cdist(M1, M2) if (not means_only): for (i, j) in pairs: if means_only: D[(i, j)] = ((M1[i] - M2[j]) ** 2).sum(axis=(- 1)) else: D[(i, j)] = (((M1[i] - M2[j]) ** 2).sum(axis=(- 1)) + (S1[i] + S2[j]).diagonal(dim1=(- 2), dim2=(- 1)).sum((- 1))) if symmetric: D[(j, i)] = D[(i, j)] else: D = D_means if return_dmeans: D_means = torch.cdist(M1, M2) return (D, D_means) else: return D
def convert_by_vocab(vocab, items, max_seq_length=None, blank_id=0, unk_id=1, uncased=False): output = [] for item in items: if uncased: item = item.lower() if (item in vocab): output.append(vocab[item]) else: output.append(unk_id) if (max_seq_length != None): if (len(output) > max_seq_length): output = output[:max_seq_length] else: while (len(output) < max_seq_length): output.append(blank_id) return output
class SetConv(nn.Module): def __init__(self, sample_feats, predicate_feats, hid_units): super(SetConv, self).__init__() self.sample_feats = sample_feats self.hid_units = hid_units self.sample_mlp1 = nn.Linear(sample_feats, hid_units) self.sample_mlp2 = nn.Linear(hid_units, hid_units) self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units) self.predicate_mlp2 = nn.Linear(hid_units, hid_units) self.out_mlp1 = nn.Linear((hid_units * 2), hid_units) self.out_mlp2 = nn.Linear(hid_units, 1) def forward(self, samples, predicates, sample_mask, predicate_mask): hid_sample = F.relu(self.sample_mlp1(samples)) hid_sample = F.relu(self.sample_mlp2(hid_sample)) hid_sample = (hid_sample * sample_mask) hid_sample = torch.sum(hid_sample, dim=1, keepdim=False) sample_norm = sample_mask.sum(1, keepdim=False) hid_sample = (hid_sample / sample_norm) hid_predicate = F.relu(self.predicate_mlp1(predicates)) hid_predicate = F.relu(self.predicate_mlp2(hid_predicate)) hid_predicate = (hid_predicate * predicate_mask) hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False) predicate_norm = predicate_mask.sum(1, keepdim=False) hid_predicate = (hid_predicate / predicate_norm) hid = torch.cat((hid_sample, hid_predicate), 1) hid = F.relu(self.out_mlp1(hid)) out = torch.sigmoid(self.out_mlp2(hid)) return out def name(self): return f'mscn_hid{self.hid_units}_sample{self.sample_feats}'
class SearcTestCase(TestCase): def test_lexical_search_algs(self): rolling_hash = PolynomialRollingHash(base=10, modulus=65537) rabin_karp = RabinKarpSearch(hash_function=rolling_hash) knuth_morris_pratt = KMPSearch() bayer_moore = BoyerMooreSearch() naive = NaiveSearch() pattern = 'Jane Austen' text = 'Sense and Sensibility, Pride and Prejudice, Emma, Mansfield Park, Northanger Abbey, Persuasion, and Lady Susan were written by Jane Austen and are important works of English literature.' idx_rabin_karp = rabin_karp.search(pattern, text) idx_knuth_morris_pratt = knuth_morris_pratt.search(pattern, text) idx_bayer_moore = bayer_moore.search(pattern, text) idx_naive = naive.search(pattern, text) self.assertEqual(idx_rabin_karp, idx_knuth_morris_pratt) self.assertEqual(idx_rabin_karp, idx_bayer_moore) self.assertEqual(idx_rabin_karp, idx_naive) for _ in range(10): pattern = ''.join(random.choices(['a', 'b', 'c'], k=5)) text = ''.join(random.choices(['a', 'b', 'c'], k=100)) idx_rabin_karp = rabin_karp.search(pattern, text) idx_knuth_morris_pratt = knuth_morris_pratt.search(pattern, text) idx_bayer_moore = bayer_moore.search(pattern, text) idx_naive = naive.search(pattern, text) self.assertEqual(idx_rabin_karp, idx_knuth_morris_pratt) self.assertEqual(idx_rabin_karp, idx_bayer_moore) self.assertEqual(idx_rabin_karp, idx_naive)
def _concat_clip(data_bundle, max_len, concat_field_name='raw_chars'): tokenizer = get_tokenizer('cn-char', lang='cn') for name in list(data_bundle.datasets.keys()): ds = data_bundle.get_dataset(name) data_bundle.delete_dataset(name) new_ds = DataSet() for ins in ds: new_ins = deepcopy(ins) context = ins['context'] question = ins['question'] cnt_lst = tokenizer(context) q_lst = tokenizer(question) answer_start = (- 1) if (((len(cnt_lst) + len(q_lst)) + 3) > max_len): if (('answer_starts' in ins) and ('answers' in ins)): answer_start = int(ins['answer_starts'][0]) answer = ins['answers'][0] answer_end = (answer_start + len(answer)) if (answer_end > ((max_len - 3) - len(q_lst))): span_start = (((answer_end + 3) + len(q_lst)) - max_len) span_end = answer_end else: span_start = 0 span_end = ((max_len - 3) - len(q_lst)) cnt_lst = cnt_lst[span_start:span_end] answer_start = int(ins['answer_starts'][0]) answer_start -= span_start answer_end = (answer_start + len(ins['answers'][0])) else: cnt_lst = cnt_lst[:((max_len - len(q_lst)) - 3)] elif (('answer_starts' in ins) and ('answers' in ins)): answer_start = int(ins['answer_starts'][0]) answer_end = (answer_start + len(ins['answers'][0])) tokens = ((cnt_lst + ['[SEP]']) + q_lst) new_ins['context_len'] = len(cnt_lst) new_ins[concat_field_name] = tokens if (answer_start != (- 1)): new_ins['target_start'] = answer_start new_ins['target_end'] = (answer_end - 1) new_ds.append(new_ins) data_bundle.set_dataset(new_ds, name) return data_bundle
class FairseqBMUF(FairseqOptimizer): def __init__(self, args, optimizer): super().__init__(args) self._optimizer = optimizer self._num_updates = 0 self.sync_iter = self.args.global_sync_iter self.block_momentum = self.args.block_momentum self.block_lr = self.args.block_lr self._reset_local_data() self.warmup_iteration = self.args.warmup_iterations self.use_nbm = self.args.use_nbm self.initial_state = self._optimizer.state_dict() self.average_sync = self.args.average_sync def add_args(parser): parser.add_argument('--block-lr', default=1, type=float, help='block learning rate for bmuf') parser.add_argument('--block-momentum', default=0.875, type=float, help='block momentum for bmuf') parser.add_argument('--global-sync-iter', default=50, type=int, help='Iteration for syncing global model') parser.add_argument('--warmup-iterations', default=500, type=int, help='warmup iterations for model to broadcast') parser.add_argument('--use-nbm', default=True, action='store_true', help='Specify whether you want to use classical BM / Nesterov BM') parser.add_argument('--average-sync', default=True, action='store_true', help='Specify whether you want to average the local momentum after each sync') def optimizer(self): return self._optimizer.optimizer def optimizer_config(self): return self._optimizer.optimizer_config def get_lr(self): return self._optimizer.get_lr() def set_lr(self, lr): self._optimizer.set_lr(lr) def state_dict(self): return self._optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): self._optimizer.load_state_dict(state_dict, optimizer_overrides) def multiply_grads(self, c): self._optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm): return self._optimizer.clip_grad_norm(max_norm) def average_params(self): self._optimizer.average_params() def _block_sync(self): if (self.block_momentum != 0): self._calc_grad() self._avg_grad_from_all_gpus() if (self.block_momentum != 0): self._update_global_model() if self.average_sync: self.average_params() def _is_warmup_end(self): if (self.get_num_updates() == self.warmup_iteration): return True return False def _is_bmuf_iter(self): if ((self.get_num_updates() > self.warmup_iteration) and ((self.get_num_updates() % self.sync_iter) == 0)): return True return False def _warmup_sync(self, root_rank=0): for param in self.params: dist.broadcast(param.data, src=root_rank) if self.average_sync: self._optimizer.average_params() else: self._optimizer.load_state_dict(self.initial_state) self._reset_local_data() def step(self, closure=None): self._optimizer.step(closure) self.set_num_updates((self.get_num_updates() + 1)) if self._is_warmup_end(): self._warmup_sync() elif self._is_bmuf_iter(): self._block_sync() def zero_grad(self): self._optimizer.zero_grad() def get_num_updates(self): return self._num_updates def set_num_updates(self, num_updates): self._num_updates = num_updates _grad() def _reset_local_data(self): self.global_params = [torch.zeros_like(p.data) for p in self.params] self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params] self.grads = [p.data.new_zeros(p.data.size()) for p in self.params] for (param, global_param) in zip(self.params, self.global_params): global_param.copy_(param.data) _grad() def _calc_grad(self): for (index, (param, global_param)) in enumerate(zip(self.params, self.global_params)): self.grads[index] = (global_param - param.data) def _avg_grad_from_all_gpus(self): for (index, param) in enumerate(self.params): sync_para = (param.data if (self.block_momentum == 0) else self.grads[index]) sync_para /= float(dist.get_world_size()) dist.all_reduce(sync_para, op=dist.ReduceOp.SUM) _grad() def _update_global_model(self): for (index, (param, global_param, smoothed_grad, grad)) in enumerate(zip(self.params, self.global_params, self.smoothed_grads, self.grads)): smoothed_grad = ((self.block_momentum * smoothed_grad) + (self.block_lr * grad)) param.data.copy_((global_param - smoothed_grad)) if self.use_nbm: param.data.copy_((param.data - (self.block_momentum * smoothed_grad))) self.smoothed_grads[index] = smoothed_grad global_param.copy_(param.data)
def get_conda_environment_content(build_metadata): template = environment.from_string("\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for CI builds:\n# build_tools/update_environments_and_lock_files.py\nchannels:\n - {{ build_metadata['channel'] }}\ndependencies:\n {% for conda_dep in build_metadata['conda_dependencies'] %}\n - {{ conda_dep | get_package_with_constraint(build_metadata) }}\n {% endfor %}\n {% if build_metadata['pip_dependencies'] %}\n - pip\n - pip:\n {% for pip_dep in build_metadata.get('pip_dependencies', []) %}\n - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n {% endfor %}\n {% endif %}".strip()) return template.render(build_metadata=build_metadata)
_numpy_output(check_dtype=True) def test_ufunc_arccosh_c(A: dace.complex64[10]): return np.arccosh(A)
def setup_basic_build_env(): u = platform.uname() if ((u.system, u.machine) == ('Windows', 'AMD64')): setup_clang(as_compiler=False) setup_msvc() else: setup_clang() setup_llvm() setup_vulkan() sccache = setup_sccache() (python, pip) = setup_python(get_desired_python_version()) return (sccache, python, pip)
def test_eq_different_tests(chromosome): test_1 = MagicMock(tcc.TestCaseChromosome) test_2 = MagicMock(tcc.TestCaseChromosome) test_3 = MagicMock(tcc.TestCaseChromosome) other = tsc.TestSuiteChromosome() chromosome.add_test_case_chromosomes([test_1, test_2]) other.add_test_case_chromosomes([test_1, test_3]) assert (not chromosome.__eq__(other))
def get_cifar_dataset(config: ml_collections.ConfigDict) -> Tuple[(tf.data.Dataset, tf.data.Dataset, tf.data.Dataset)]: ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data() num_training_examples = config.num_training_examples ((x_train, y_train), (x_val, y_val)) = ((x_train[:num_training_examples], y_train[:num_training_examples]), (x_train[num_training_examples:], y_train[num_training_examples:])) train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_ds = train_ds.shuffle(config.buffer_size).batch(config.batch_size) val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_ds = val_ds.batch(config.batch_size) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_ds = test_ds.batch(config.batch_size) return (train_ds, val_ds, test_ds)
def test_regression_generator(test_path): stream = RegressionGenerator(n_samples=100, n_features=20, n_targets=4, n_informative=6, random_state=0) assert (stream.n_remaining_samples() == 100) expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4', 'att_num_5', 'att_num_6', 'att_num_7', 'att_num_8', 'att_num_9', 'att_num_10', 'att_num_11', 'att_num_12', 'att_num_13', 'att_num_14', 'att_num_15', 'att_num_16', 'att_num_17', 'att_num_18', 'att_num_19'] assert (stream.feature_names == expected_names) assert (stream.target_values == ([float] * stream.n_targets)) expected_names = ['target_0', 'target_1', 'target_2', 'target_3'] assert (stream.target_names == expected_names) assert (stream.n_features == 20) assert (stream.n_cat_features == 0) assert (stream.n_num_features == 20) assert (stream.n_targets == 4) assert (stream.get_data_info() == 'Regression Generator - 4 targets, 20 features') assert (stream.has_more_samples() is True) assert (stream.is_restartable() is True) test_file = os.path.join(test_path, 'regression_stream.npz') data = np.load(test_file) X_expected = data['X'] y_expected = data['y'] (X, y) = stream.next_sample() assert np.allclose(X[0], X_expected[0]) assert np.allclose(y[0], y_expected[0]) (X, y) = stream.last_sample() assert np.allclose(X[0], X_expected[0]) assert np.allclose(y[0], y_expected[0]) stream.restart() (X, y) = stream.next_sample(10) assert np.allclose(X, X_expected) assert np.allclose(y, y_expected) assert (stream.n_targets == y.shape[1]) assert (stream.n_features == X.shape[1]) assert ('stream' == stream._estimator_type) expected_info = 'RegressionGenerator(n_features=20, n_informative=6, n_samples=100, n_targets=4,\n random_state=0)' assert (stream.get_info() == expected_info)
def mwt_name(base_dir, short_name, dataset): return os.path.join(base_dir, f'{short_name}-ud-{dataset}-mwt.json')
class ImagenetRunConfig(RunConfig): def __init__(self, n_epochs=150, init_lr=0.05, lr_schedule_type='cosine', lr_schedule_param=None, dataset='imagenet', train_batch_size=256, test_batch_size=500, valid_size=None, opt_type='sgd', opt_param=None, weight_decay=4e-05, label_smoothing=0.1, no_decay_keys='bn', model_init='he_fout', init_div_groups=False, validation_frequency=1, print_frequency=10, n_worker=32, resize_scale=0.08, distort_color='normal', **kwargs): super(ImagenetRunConfig, self).__init__(n_epochs, init_lr, lr_schedule_type, lr_schedule_param, dataset, train_batch_size, test_batch_size, valid_size, opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys, model_init, init_div_groups, validation_frequency, print_frequency) self.n_worker = n_worker self.resize_scale = resize_scale self.distort_color = distort_color print(kwargs.keys()) def data_config(self): return {'train_batch_size': self.train_batch_size, 'test_batch_size': self.test_batch_size, 'valid_size': self.valid_size, 'n_worker': self.n_worker, 'resize_scale': self.resize_scale, 'distort_color': self.distort_color}
def is_email(word): arr = re.findall('\\w*\\w*\\.\\w*', word) if (len(arr) == 0): return False else: print('EMAIL', word) return True
def _reduce_with_dtype(onnx_op, name, allow_multi_dim_support=True): symbolic = _reduce_op_symbolic(onnx_op, allow_multi_dim_support=allow_multi_dim_support) _by_arg_count def reduce(g, *args, **kwargs): _args('v', 'none') def reduce_nodim(g, self, dtype): if (dtype.node().kind() != 'prim::Constant'): return _unimplemented(name, 'dtype') return symbolic(g, self) dim_desc = ('is' if allow_multi_dim_support else 'i') _args('v', dim_desc, 'i', 'none') def reduce_dim(g, self, dim, keepdim, dtype): if (dtype.node().kind() != 'prim::Constant'): return _unimplemented(name, 'dtype') return symbolic(g, self, dim, keepdim) return (reduce_nodim, reduce_dim) return reduce
def set_default_config(config): config['tpu_version'] = config.get('tpu_version', 3) config['tpu_cores'] = {3: 8, 4: 4}[config['tpu_version']] config['tpu_size_logical'] = {3: config['tpu_size'], 4: (config['tpu_size'] // 2)}[config['tpu_version']] config['tpu_name'] = config['tpu_name'].format(**config) config['tpu_network'] = config.get('tpu_network', None) config['tpu_subnetwork'] = config.get('tpu_subnetwork', None) config['tpu_tags'] = config.get('tpu_tags', None) config['tpu_reserved'] = config.get('tpu_reserved', False) config['tpu_internal_ips'] = config.get('tpu_internal_ips', False) config['opt_gradient_accumulation_steps'] = config.get('opt_gradient_accumulation_steps', 1) config['restore_reinit'] = config.get('restore_reinit', False) config['restore_ckpt'] = config.get('restore_ckpt', None) config['restore_step'] = config.get('restore_step', 0) config['restore_retry'] = config.get('restore_retry', 0) config['wandb_run_id'] = config.get('wandb_run_id', None) assert (config['tpu_version'] in [3, 4]) assert (config['tpu_cores'] == {3: 8, 4: 4}[config['tpu_version']]) assert (config['tpu_size'] in [8, 32, 64, 128, 256, 512, 1024]) assert ((config['model_vocab_size'] % config['tpu_cores']) == 0) return config
class SortOp(torch.autograd.Function): def forward(ctx, x, end_bit=None): if (end_bit is None): end_bit = _BITS_FOR_DTYPE[x.dtype] x_out = torch.empty_like(x) iota_out = torch.empty_like(x) ops.sort(x, end_bit, x_out, iota_out) return (x_out, iota_out)
class ProppyBaseEmbedder(nn.Module): def __init__(self, dim, utterance_embedder, recursive_texts, attr_embed_dim, max_attr_tokens, min_id_freq, min_class_freq, dropout): super(ProppyBaseEmbedder, self).__init__() self._dim = dim self._utterance_embedder = utterance_embedder self._max_words = utterance_embedder.max_words self._recursive_texts = recursive_texts self._attr_embed_dim = attr_embed_dim tags = ([UNK, EOS] + TAGS) self._tag_embedder = TokenEmbedder(RandomEmbeddings(tags, attr_embed_dim)) ids = read_frequency_vocab('frequent-ids', min_id_freq) self._id_embedder = AverageUtteranceEmbedder(TokenEmbedder(RandomEmbeddings(ids, attr_embed_dim)), max_attr_tokens) classes = read_frequency_vocab('frequent-classes', min_class_freq) self._classes_embedder = AverageUtteranceEmbedder(TokenEmbedder(RandomEmbeddings(classes, attr_embed_dim)), max_attr_tokens) coords_dim = 3 input_dim = ((self._utterance_embedder.embed_dim + (3 * attr_embed_dim)) + coords_dim) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(input_dim, dim) def embed_dim(self): return self._dim def token_embedder(self): return self._utterance_embedder.token_embedder def utterance_embedder(self): return self._utterance_embedder def forward(self, nodes): texts = [] for node in nodes: if self._recursive_texts: text = ' '.join(node.all_texts(max_words=self._max_words)) else: text = (node.text or '') texts.append(word_tokenize(text.lower())) text_embeddings = self._utterance_embedder(texts) tag_embeddings = self._tag_embedder.embed_tokens([node.tag for node in nodes]) id_embeddings = self._id_embedder([word_tokenize(node.id_) for node in nodes]) class_embeddings = self._classes_embedder([word_tokenize(' '.join(node.classes)) for node in nodes]) coords = V(FT([[elem.x_ratio, elem.y_ratio, float(elem.visible)] for elem in nodes])) dom_embeddings = torch.cat((text_embeddings, tag_embeddings, id_embeddings, class_embeddings, coords), dim=1) return self.fc(dom_embeddings)
class LeNet(nn.Sequential): def __init__(self, dropout=0): super().__init__(nn.Conv2d(3, 6, kernel_size=5), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2), Flatten(((16 * 5) * 5)), nn.Linear(((16 * 5) * 5), 120), nn.ReLU(), nn.Dropout(dropout), nn.Linear(120, 84), nn.ReLU(), nn.Dropout(dropout), nn.Linear(84, 10))
class VQLPIPSWithDiscriminator(nn.Module): def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_ndf=64, disc_loss='hinge'): super().__init__() assert (disc_loss in ['hinge', 'vanilla']) self.codebook_weight = codebook_weight self.pixel_weight = pixelloss_weight self.perceptual_loss = LPIPS().eval() self.perceptual_weight = perceptual_weight self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init) self.discriminator_iter_start = disc_start if (disc_loss == 'hinge'): self.disc_loss = hinge_d_loss elif (disc_loss == 'vanilla'): self.disc_loss = vanilla_d_loss else: raise ValueError(f"Unknown GAN loss '{disc_loss}'.") print(f'VQLPIPSWithDiscriminator running with {disc_loss} loss.') self.disc_factor = disc_factor self.discriminator_weight = disc_weight self.disc_conditional = disc_conditional def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): if (last_layer is not None): nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] else: nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001)) d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach() d_weight = (d_weight * self.discriminator_weight) return d_weight def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, global_step, last_layer=None, cond=None, split='train'): rec_loss = torch.abs((inputs.contiguous() - reconstructions.contiguous())) if (self.perceptual_weight > 0): p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) rec_loss = (rec_loss + (self.perceptual_weight * p_loss)) else: p_loss = torch.tensor([0.0]) nll_loss = rec_loss nll_loss = torch.mean(nll_loss) if (optimizer_idx == 0): if (cond is None): assert (not self.disc_conditional) logits_fake = self.discriminator(reconstructions.contiguous()) else: assert self.disc_conditional logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) g_loss = (- torch.mean(logits_fake)) try: d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) except RuntimeError: assert (not self.training) d_weight = torch.tensor(0.0) disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) loss = (nll_loss + ((d_weight * disc_factor) * g_loss)) if (codebook_loss is not None): loss = (loss + (self.codebook_weight * codebook_loss.mean())) else: codebook_loss = torch.tensor(0.0) log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/quant_loss'.format(split): codebook_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/p_loss'.format(split): p_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()} return (loss, log) if (optimizer_idx == 1): if (cond is None): logits_real = self.discriminator(inputs.contiguous().detach()) logits_fake = self.discriminator(reconstructions.contiguous().detach()) else: logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake)) log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()} return (d_loss, log)
def _logm(A): A = np.asarray(A) if ((len(A.shape) != 2) or (A.shape[0] != A.shape[1])): raise ValueError('expected a square matrix') if issubclass(A.dtype.type, np.integer): A = np.asarray(A, dtype=float) keep_it_real = np.isrealobj(A) try: if np.array_equal(A, np.triu(A)): A = _logm_force_nonsingular_triangular_matrix(A) if (np.min(np.diag(A)) < 0): A = A.astype(complex) return _logm_triu(A) else: if keep_it_real: (T, Z) = schur(A) if (not np.array_equal(T, np.triu(T))): (T, Z) = rsf2csf(T, Z) else: (T, Z) = schur(A, output='complex') T = _logm_force_nonsingular_triangular_matrix(T, inplace=True) U = _logm_triu(T) ZH = np.conjugate(Z).T return Z.dot(U).dot(ZH) except (SqrtmError, LogmError): X = np.empty_like(A) X.fill(np.nan) return X
_utils.test(require=ti.extension.sparse) def test_require_extensions_1(): assert (ti.lang.impl.current_cfg().arch in [ti.cpu, ti.cuda, ti.metal])
def raw_rnn(cell, loop_fn, parallel_iterations=None, swap_memory=False, scope=None): if (not _like_rnncell(cell)): raise TypeError('cell must be an instance of RNNCell') if (not callable(loop_fn)): raise TypeError('loop_fn must be a callable') parallel_iterations = (parallel_iterations or 32) with vs.variable_scope((scope or 'rnn')) as varscope: if context.in_graph_mode(): if (varscope.caching_device is None): varscope.set_caching_device((lambda op: op.device)) time = constant_op.constant(0, dtype=dtypes.int32) (elements_finished, next_input, initial_state, emit_structure, init_loop_state) = loop_fn(time, None, None, None) flat_input = nest.flatten(next_input) loop_state = (init_loop_state if (init_loop_state is not None) else constant_op.constant(0, dtype=dtypes.int32)) input_shape = [input_.get_shape() for input_ in flat_input] static_batch_size = input_shape[0][0] for input_shape_i in input_shape: static_batch_size.merge_with(input_shape_i[0]) batch_size = static_batch_size.value const_batch_size = batch_size if (batch_size is None): batch_size = array_ops.shape(flat_input[0])[0] nest.assert_same_structure(initial_state, cell.state_size) state = initial_state flat_state = nest.flatten(state) flat_state = [ops.convert_to_tensor(s) for s in flat_state] state = nest.pack_sequence_as(structure=state, flat_sequence=flat_state) if (emit_structure is not None): flat_emit_structure = nest.flatten(emit_structure) flat_emit_size = [(emit.shape if emit.shape.is_fully_defined() else array_ops.shape(emit)) for emit in flat_emit_structure] flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure] else: emit_structure = cell.output_size flat_emit_size = nest.flatten(emit_structure) flat_emit_dtypes = ([flat_state[0].dtype] * len(flat_emit_size)) flat_state_size = [(s.shape if s.shape.is_fully_defined() else array_ops.shape(s)) for s in flat_state] flat_state_dtypes = [s.dtype for s in flat_state] flat_emit_ta = [tensor_array_ops.TensorArray(dtype=dtype_i, dynamic_size=True, element_shape=tensor_shape.TensorShape([const_batch_size]).concatenate(_maybe_tensor_shape_from_tensor(size_i)), size=0, name=('rnn_output_%d' % i)) for (i, (dtype_i, size_i)) in enumerate(zip(flat_emit_dtypes, flat_emit_size))] emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=flat_emit_ta) flat_zero_emit = [array_ops.zeros(_concat(batch_size, size_i), dtype_i) for (size_i, dtype_i) in zip(flat_emit_size, flat_emit_dtypes)] zero_emit = nest.pack_sequence_as(structure=emit_structure, flat_sequence=flat_zero_emit) flat_state_ta = [tensor_array_ops.TensorArray(dtype=dtype_i, dynamic_size=True, element_shape=tensor_shape.TensorShape([const_batch_size]).concatenate(_maybe_tensor_shape_from_tensor(size_i)), size=0, name=('rnn_state_%d' % i)) for (i, (dtype_i, size_i)) in enumerate(zip(flat_state_dtypes, flat_state_size))] state_ta = nest.pack_sequence_as(structure=state, flat_sequence=flat_state_ta) def condition(unused_time, elements_finished, *_): return math_ops.logical_not(math_ops.reduce_all(elements_finished)) def body(time, elements_finished, current_input, state_ta, emit_ta, state, loop_state): (next_output, cell_state) = cell(current_input, state) nest.assert_same_structure(state, cell_state) nest.assert_same_structure(cell.output_size, next_output) next_time = (time + 1) (next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(next_time, next_output, cell_state, loop_state) nest.assert_same_structure(state, next_state) nest.assert_same_structure(current_input, next_input) nest.assert_same_structure(emit_ta, emit_output) loop_state = (loop_state if (next_loop_state is None) else next_loop_state) def _copy_some_through(current, candidate): def copy_fn(cur_i, cand_i): if isinstance(cur_i, tensor_array_ops.TensorArray): return cand_i if (cur_i.shape.ndims == 0): return cand_i with ops.colocate_with(cand_i): return array_ops.where(elements_finished, cur_i, cand_i) return nest.map_structure(copy_fn, current, candidate) emit_output = _copy_some_through(zero_emit, emit_output) next_state = _copy_some_through(state, next_state) emit_ta = nest.map_structure((lambda ta, emit: ta.write(time, emit)), emit_ta, emit_output) state_ta = nest.map_structure((lambda ta, state: ta.write(time, state)), state_ta, next_state) elements_finished = math_ops.logical_or(elements_finished, next_finished) return (next_time, elements_finished, next_input, state_ta, emit_ta, next_state, loop_state) returned = control_flow_ops.while_loop(condition, body, loop_vars=[time, elements_finished, next_input, state_ta, emit_ta, state, loop_state], parallel_iterations=parallel_iterations, swap_memory=swap_memory) (state_ta, emit_ta, final_state, final_loop_state) = returned[(- 4):] flat_states = nest.flatten(state_ta) flat_states = [array_ops.transpose(ta.stack(), (1, 0, 2)) for ta in flat_states] states = nest.pack_sequence_as(structure=state_ta, flat_sequence=flat_states) flat_outputs = nest.flatten(emit_ta) flat_outputs = [array_ops.transpose(ta.stack(), (1, 0, 2)) for ta in flat_outputs] outputs = nest.pack_sequence_as(structure=emit_ta, flat_sequence=flat_outputs) return (states, outputs, final_state)
def get_relative_freqs(type2freq): n = sum(type2freq.values()) type2p = {t: (s / n) for (t, s) in type2freq.items()} return type2p
def preset_import(name): if (name in ('cv2', 'opencv')): try: from cvx2 import latest as mod except ModuleNotFoundError: mod = import_module_404ok('cv2') return mod if (name in ('tf', 'tensorflow')): mod = import_module_404ok('tensorflow') return mod if (name == 'gfile'): mod = import_module_404ok('google3.pyglib.gfile') return mod if (name == 'video_api'): mod = import_module_404ok('google3.learning.deepmind.video.python.video_api') return mod if (name in ('bpy', 'bmesh', 'OpenEXR', 'Imath')): mod = import_module_404ok(name) return mod if (name in ('Vector', 'Matrix', 'Quaternion')): mod = import_module_404ok('mathutils') cls = _get_module_class(mod, name) return cls if (name == 'BVHTree'): mod = import_module_404ok('mathutils.bvhtree') cls = _get_module_class(mod, name) return cls raise NotImplementedError(name)
def train_img2state(opt): model = PixelEncoder(opt).cuda() model = nn.DataParallel(model, device_ids=[0, 1, 2, 3]) weight_path = os.path.join(opt.data_root, 'data_{}/img2state_large.pth'.format(opt.test_id1)) try: model.load_state_dict(torch.load(weight_path)) print('continue training!') except: print('training from scratch!') dataset = RobotStackdata.get_loader(opt) optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) loss_fn = nn.L1Loss() for epoch in range(opt.epoch_size): epoch_loss = 0 for (i, item) in enumerate(tqdm(dataset)): (state, action, result) = item[1] input_Bt0 = item[0][0] input_Bt1 = item[0][2] action = item[0][1] gt0 = item[2][0].float().cuda() gt1 = item[2][1].float().cuda() img = input_Bt0.float().cuda() gt = gt0.float().cuda() out = model(img) loss = loss_fn(out, gt) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss += loss.item() print('epoch:{} loss:{:.7f}'.format(epoch, (epoch_loss / len(dataset)))) weight_path = os.path.join(opt.data_root, 'data_{}/img2state_large.pth'.format(opt.test_id1)) torch.save(model.state_dict(), weight_path)
(hookwrapper=True) def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Generator[(None, Any, None)]: outcome = (yield) if is_schemathesis_test(obj): outcome.force_result(create(SchemathesisCase, parent=collector, test_function=obj, name=name)) else: outcome.get_result()
('/image', methods=['POST']) def list_image_url(): swipes = request.get_json()['swipes'] image_url = request.get_json()['image_url'] if (image_url != 'none'): image_name = image_url[7:] if (image_url != 'none'): if (image_name in os.listdir(app.config['temp'])): image_path = os.path.join(app.config['temp'], image_name) msg = None return {'image': ('/media/' + image_name), 'path': image_path, 'msg': msg, 'swipes': swipes, 'reached': '1st'} else: return {'image': 'none', 'path': 'none', 'msg': 'none', 'swipes': swipes, 'reached': '1st'} else: src = app.config['path_for_unlabeled'] image = None if len(os.listdir(src)): image = random.choice(os.listdir(src)) else: return {'image': 'none', 'path': 'none', 'msg': 'none', 'swipes': swipes, 'reached': '3rd'} image_path = os.path.join(src, image) msg = shutil.move(image_path, app.config['temp']) return {'image': ('/media/' + image), 'path': image_path, 'msg': msg, 'swipes': int(swipes)}
def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key: str): uninitialized_encoder_weights: List[str] = [] if (decoder.__class__ != encoder.__class__): logging.info(f'{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized.') def tie_encoder_to_decoder_recursively(decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, uninitialized_encoder_weights: List[str], skip_key: str, depth=0): assert (isinstance(decoder_pointer, nn.Module) and isinstance(encoder_pointer, nn.Module)), f'{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module' if (hasattr(decoder_pointer, 'weight') and (skip_key not in module_name)): assert hasattr(encoder_pointer, 'weight') encoder_pointer.weight = decoder_pointer.weight if hasattr(decoder_pointer, 'bias'): assert hasattr(encoder_pointer, 'bias') encoder_pointer.bias = decoder_pointer.bias print((module_name + ' is tied')) return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if (len(decoder_modules) > 0): assert (len(encoder_modules) > 0), f'Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}' all_encoder_weights = set([((module_name + '/') + sub_name) for sub_name in encoder_modules.keys()]) encoder_layer_pos = 0 for (name, module) in decoder_modules.items(): if name.isdigit(): encoder_name = str((int(name) + encoder_layer_pos)) decoder_name = name if ((not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name]))) and (len(encoder_modules) != len(decoder_modules))): encoder_layer_pos -= 1 continue elif (name not in encoder_modules): continue elif (depth > 500): raise ValueError('Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model.') else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively(decoder_modules[decoder_name], encoder_modules[encoder_name], ((module_name + '/') + name), uninitialized_encoder_weights, skip_key, depth=(depth + 1)) all_encoder_weights.remove(((module_name + '/') + encoder_name)) uninitialized_encoder_weights += list(all_encoder_weights) tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key)
class L1Regularization(Regularizer): def __init__(self, model, value=0.001, filter={'parameter_name': is_not_bias, 'module': is_not_bn}, pre_op=False, post_op=True, report_sparsity=False, **kwargs): super(L1Regularization, self).__init__(model, value, filter=filter, **kwargs) self.pre_op = pre_op self.post_op = post_op self.report_sparsity = report_sparsity def pre_step(self): if self.pre_op: with torch.no_grad(): for (n, p) in self._named_parameters: p.grad.add_(self.value, p.sign()) if self.report_sparsity: logging.debug('Sparsity for %s is %s', n, sparsity(p)) if self.log: logging.debug('L1 penalty of %s was applied pre optimization step', self.value) def post_step(self): if self.post_op: with torch.no_grad(): for (n, p) in self._named_parameters: p.copy_(torch.nn.functional.softshrink(p, self.value)) if self.report_sparsity: logging.debug('Sparsity for %s is %s', n, sparsity(p)) if self.log: logging.debug('L1 penalty of %s was applied post optimization step', self.value)
class Bleu(): def __init__(self, n=4): self._n = n self._hypo_for_image = {} self.ref_for_image = {} def compute_score(self, gts, res): assert (sorted(gts.keys()) == sorted(res.keys())) imgIds = list(gts.keys()) bleu_scorer = BleuScorer(n=self._n) for id in imgIds: hypo = res[id] ref = gts[id] assert (type(hypo) is list) assert (len(hypo) == 1) assert (type(ref) is list) assert (len(ref) >= 1) bleu_scorer += (hypo[0], ref) (score, scores) = bleu_scorer.compute_score(option='closest', verbose=1) return (score, scores) def method(self): return 'Bleu'
_CONVERTERS.register_module() class Muco3dhpConverter(BaseConverter): def get_intrinsic_matrix(f: List[float], c: List[float], inv: bool=False) -> np.ndarray: intrinsic_matrix = np.zeros((3, 3)).astype(np.float32) intrinsic_matrix[(0, 0)] = f[0] intrinsic_matrix[(0, 2)] = c[0] intrinsic_matrix[(1, 1)] = f[1] intrinsic_matrix[(1, 2)] = c[1] intrinsic_matrix[(2, 2)] = 1 if inv: intrinsic_matrix = np.linalg.inv(intrinsic_matrix).astype(np.float32) return intrinsic_matrix def convert(self, dataset_path: str, out_path: str) -> dict: human_data = HumanData() (image_path_, bbox_xywh_, keypoints2d_, cam_param_) = ([], [], [], [], []) smpl = {} smpl['body_pose'] = [] smpl['global_orient'] = [] smpl['betas'] = [] annot_path = dataset_path.replace('muco', 'extras/MuCo-3DHP.json') smpl_param_path = os.path.join(dataset_path, 'SMPLX/smpl_param.json') db = COCO(annot_path) with open(smpl_param_path) as f: smpl_params = json.load(f) for iid in tqdm(db.imgs.keys()): img = db.imgs[iid] (w, h) = (img['width'], img['height']) imgname = img['file_name'] R = np.array(img['R']).reshape(3, 3) T = np.array(img['T']).reshape(3) K = self.get_intrinsic_matrix(img['f'], img['c']) ann_ids = db.getAnnIds(img['id']) anns = db.loadAnns(ann_ids) camera = CameraParameter(H=h, W=w) camera.set_KRT(K, R, T) parameter_dict = camera.to_dict() for (i, pid) in enumerate(ann_ids): try: smpl_param = smpl_params[str(pid)] (pose, shape, trans) = (np.array(smpl_param['pose']), np.array(smpl_param['shape']), np.array(smpl_param['trans'])) sum = ((pose.sum() + shape.sum()) + trans.sum()) if np.isnan(sum): continue except KeyError: continue joint_img = np.array(anns[i]['keypoints_img']) bbox = np.array(anns[i]['bbox']) keypoints_vis = np.array(anns[i]['keypoints_vis']).astype('int').reshape((- 1), 1) if (not (int(keypoints_vis[14]) == 1)): continue joint_img = np.hstack([joint_img, keypoints_vis]) keypoints2d_.append(joint_img) bbox_xywh_.append(bbox) smpl['body_pose'].append(pose[3:].reshape((23, 3))) smpl['global_orient'].append(pose[:3]) smpl['betas'].append(shape) cam_param_.append(parameter_dict) image_path_.append(f'images/{imgname}') smpl['body_pose'] = np.array(smpl['body_pose']).reshape(((- 1), 23, 3)) smpl['global_orient'] = np.array(smpl['global_orient']).reshape(((- 1), 3)) smpl['betas'] = np.array(smpl['betas']).reshape(((- 1), 10)) bbox_xywh_ = np.array(bbox_xywh_).reshape(((- 1), 4)) bbox_xywh_ = np.hstack([bbox_xywh_, np.ones([bbox_xywh_.shape[0], 1])]) keypoints2d_ = np.array(keypoints2d_).reshape(((- 1), 21, 3)) (keypoints2d_, mask) = convert_kps(keypoints2d_, 'muco', 'human_data') human_data['image_path'] = image_path_ human_data['keypoints2d_mask'] = mask human_data['keypoints2d'] = keypoints2d_ human_data['bbox_xywh'] = bbox_xywh_ human_data['smpl'] = smpl human_data['cam_param'] = cam_param_ human_data['config'] = 'muco3dhp' human_data.compress_keypoints_by_mask() if (not os.path.isdir(out_path)): os.makedirs(out_path) file_name = 'muco3dhp_no3dkp_train.npz' out_file = os.path.join(out_path, file_name) human_data.dump(out_file)
class SignedTensorProductFunctor(CovariantFunctorialConstruction): _functor_name = 'tensor' _functor_category = 'SignedTensorProducts' symbol = ' # ' unicode_symbol = f' {unicode_otimes} ' def _repr_(self): return 'The signed tensor functorial construction'
class SpeechRecognitionModel(nn.Module): def __init__(self, n_cnn_layers, n_rnn_layers, rnn_dim, n_class, n_feats, stride=2, dropout=0.1): super(SpeechRecognitionModel, self).__init__() n_feats = (n_feats // 2) self.cnn = nn.Conv2d(1, 32, 3, stride=stride, padding=(3 // 2)) self.rescnn_layers = nn.Sequential(*[ResidualCNN(32, 32, kernel=3, stride=1, dropout=dropout, n_feats=n_feats) for _ in range(n_cnn_layers)]) self.fully_connected = nn.Linear((n_feats * 32), rnn_dim) self.birnn_layers = nn.Sequential(*[BidirectionalGRU(rnn_dim=(rnn_dim if (i == 0) else (rnn_dim * 2)), hidden_size=rnn_dim, dropout=dropout, batch_first=(i == 0)) for i in range(n_rnn_layers)]) self.classifier = nn.Sequential(nn.Linear((rnn_dim * 2), rnn_dim), nn.Sigmoid(), nn.Dropout(dropout), nn.Linear(rnn_dim, n_class)) def forward(self, x): x = self.cnn(x) x = self.rescnn_layers(x) sizes = x.size() x = x.view(sizes[0], (sizes[1] * sizes[2]), sizes[3]) x = x.transpose(1, 2) x = self.fully_connected(x) x = self.birnn_layers(x) x = self.classifier(x) return x
def select_unit(t: float) -> Tuple[(str, float)]: time_unit = {(- 3): 'ns', (- 2): 'us', (- 1): 'ms'}.get(int((torch.tensor(t).log10().item() // 3)), 's') time_scale = {'ns': 1e-09, 'us': 1e-06, 'ms': 0.001, 's': 1}[time_unit] return (time_unit, time_scale)
class pAdicLseriesOrdinary(pAdicLseries): def series(self, n=2, quadratic_twist=(+ 1), prec=5, eta=0): n = ZZ(n) if (n < 1): raise ValueError(('n (=%s) must be a positive integer' % n)) if ((self._p == 2) and (n == 1)): raise ValueError(('n (=%s) must be a at least 2 if p is 2' % n)) if (prec < 1): raise ValueError(('Insufficient precision (%s)' % prec)) eta = ((ZZ(eta) % (self._p - 1)) if (self._p != 2) else (ZZ(eta) % 2)) D = ZZ(quadratic_twist) if (D != 1): if (eta != 0): raise NotImplementedError('quadratic twists only implemented for the 0th Teichmueller component') if ((D % 4) == 0): d = (D // 4) if ((not d.is_squarefree()) or ((d % 4) == 1)): raise ValueError(('quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field' % D)) elif ((not D.is_squarefree()) or ((D % 4) != 1)): raise ValueError(('quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field' % D)) if (gcd(D, self._p) != 1): raise ValueError(('quadratic twist (=%s) must be coprime to p (=%s) ' % (D, self._p))) if (gcd(D, self._E.conductor()) != 1): for ell in prime_divisors(D): if (valuation(self._E.conductor(), ell) > valuation(D, ell)): raise ValueError(('cannot twist a curve of conductor (=%s) by the quadratic twist (=%s).' % (self._E.conductor(), D))) p = self._p si = (1 - (2 * (eta % 2))) if (prec == 1): if (eta == 0): K = Qp(p, 20, print_mode='series') R = PowerSeriesRing(K, 'T', 1) L = self.modular_symbol(0, sign=(+ 1), quadratic_twist=D) chip = kronecker_symbol(D, p) if ((self._E.conductor() % p) == 0): L *= (1 - (chip / self.alpha())) else: L *= ((1 - (chip / self.alpha())) ** 2) L /= (self._quotient_of_periods_to_twist(D) * self._E.real_components()) L = R(L, 1) return L else: bounds = self._prec_bounds(n, prec, sign=si) padic_prec = 20 else: bounds = self._prec_bounds(n, prec, sign=si) padic_prec = (max(bounds[1:]) + 5) verbose(('using p-adic precision of %s' % padic_prec)) if (p == 2): res_series_prec = min((p ** (n - 2)), prec) else: res_series_prec = min((p ** (n - 1)), prec) verbose(('using series precision of %s' % res_series_prec)) ans = self._get_series_from_cache(n, res_series_prec, D, eta) if (ans is not None): verbose('found series in cache') return ans K = QQ R = PowerSeriesRing(K, 'T', res_series_prec) T = R(R.gen(), res_series_prec) L = R(0) one_plus_T_factor = R(1) gamma_power = K(1) teich = self.teichmuller(padic_prec) if (p == 2): teich = [0, 1, (- 1)] gamma = K(5) p_power = (2 ** (n - 2)) a_range = 3 else: teich = self.teichmuller(padic_prec) gamma = K((1 + p)) p_power = (p ** (n - 1)) a_range = p verbose(('Now iterating over %s summands' % ((p - 1) * p_power))) verbose_level = get_verbose() count_verb = 0 for j in range(p_power): s = K(0) if ((verbose_level >= 2) and (((j / p_power) * 100) > (count_verb + 3))): verbose(('%.2f percent done' % ((float(j) / p_power) * 100))) count_verb += 3 for a in range(1, a_range): b = (teich[a] * gamma_power) s += ((teich[a] ** eta) * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si).lift()) L += (s * one_plus_T_factor) one_plus_T_factor *= (1 + T) gamma_power *= gamma verbose(('the series before adjusting the precision is %s' % L)) K = Qp(p, padic_prec, print_mode='series') R = PowerSeriesRing(K, 'T', res_series_prec) L = R(L, res_series_prec) aj = L.list() if aj: aj = ([aj[0].add_bigoh((padic_prec - 2))] + [aj[j].add_bigoh(bounds[j]) for j in range(1, len(aj))]) L = R(aj, res_series_prec) L /= self._quotient_of_periods_to_twist(D) if (si == (+ 1)): L /= self._E.real_components() self._set_series_in_cache(n, res_series_prec, D, eta, L) return L power_series = series def is_ordinary(self): return True def is_supersingular(self): return False _method def _c_bound(self, sign=(+ 1)): E = self._E p = self._p N = self._E.conductor() if E.galois_representation().is_irreducible(p): return 0 if (self._implementation == 'sage'): m = E.modular_symbol_space(sign=1) b = m.boundary_map().codomain() C = b._known_cusps() if (sign == (+ 1)): return max([valuation(self.modular_symbol(a).denominator(), p) for a in C]) else: try: m = self._negative_modular_symbol except (KeyError, AttributeError): if (not hasattr(self, '_modular_symbol_negative')): self._add_negative_space() m = self._negative_modular_symbol return max([valuation(m(a).denominator(), p) for a in C]) from sage.databases.cremona import CremonaDatabase isog = E.isogeny_class() t = 0 if (N <= CremonaDatabase().largest_conductor()): E0 = E.optimal_curve() else: ff = (lambda C: C.period_lattice().complex_area()) E0 = min(isog.curves, key=ff) if (p == 2): t = 1 if (sign == 1): t += E.torsion_order().valuation(p) elif (p == 2): t += 1 if ((p == 2) and (E0.real_components() == 1)): t += 1 L0 = E0.period_lattice().basis() L = E.period_lattice().basis() if (sign == 1): om = L[0] om0 = L0[0] else: om = L[1].imag() if (E.real_components() == 1): om *= 2 om0 = L[1].imag() if (E0.real_components() == 1): om0 *= 2 m = max(isog.matrix().list()) q = (((om / om0) * m).round() / m) t += valuation(q, p) return max(t, 0) def _prec_bounds(self, n, prec, sign=(+ 1)): if (self._p == 2): e = self._e_bounds((n - 2), prec) else: e = self._e_bounds((n - 1), prec) c = self._c_bound() return [(e[j] - c) for j in range(len(e))]
def test_nthroot_mod_list(): assert (nthroot_mod_list((- 4), 4, 65) == [4, 6, 7, 9, 17, 19, 22, 32, 33, 43, 46, 48, 56, 58, 59, 61]) assert (nthroot_mod_list(2, 3, 7) == [])
def get_type_of_msg(hall_symbol): if ("'" not in hall_symbol): return 1 if (" 1'" in hall_symbol): return 2 if (len(re.findall(" 1[a-z]+'", hall_symbol)) > 0): return 4 return 3
class CategoryIDColumn(CategoryColumn): def __init__(self, field_desc, bucket_size): assert isinstance(field_desc, FieldDesc) self.field_desc = field_desc self.bucket_size = bucket_size def get_field_desc(self): return [self.field_desc] def new_feature_column_from(self, field_desc): return CategoryIDColumn(field_desc, self.bucket_size) def num_class(self): return self.bucket_size def _to_dict(self): return {'field_desc': self.field_desc.to_dict(), 'bucket_size': self.bucket_size} def _from_dict(cls, d): field_desc = FieldDesc.from_dict(d['field_desc']) bucket_size = d['bucket_size'] return CategoryIDColumn(field_desc, bucket_size)
_zero_only def enforce_tags(cfg: DictConfig, save_to_file: bool=False) -> None: if (not cfg.get('tags')): if ('id' in HydraConfig().cfg.hydra.job): raise ValueError('Specify tags before launching a multirun!') log.warning('No tags provided in config. Prompting user to input tags...') tags = Prompt.ask('Enter a list of comma separated tags', default='dev') tags = [t.strip() for t in tags.split(',') if (t != '')] with open_dict(cfg): cfg.tags = tags log.info(f'Tags: {cfg.tags}') if save_to_file: with open(Path(cfg.paths.output_dir, 'tags.log'), 'w') as file: rich.print(cfg.tags, file=file)
def bls_test_template(dim, N, bs, stencil, block_dim=None, scatter=False, benchmark=0, dense=False): (x, y, y2) = (ti.field(ti.i32), ti.field(ti.i32), ti.field(ti.i32)) index = ti.axes(*range(dim)) mismatch = ti.field(ti.i32, shape=()) if (not isinstance(bs, (tuple, list))): bs = [bs for _ in range(dim)] grid_size = [(N // bs[i]) for i in range(dim)] if dense: create_block = (lambda : ti.root.dense(index, grid_size)) else: create_block = (lambda : ti.root.pointer(index, grid_size)) if scatter: block = create_block() block.dense(index, bs).place(x) block.dense(index, bs).place(y) block.dense(index, bs).place(y2) else: create_block().dense(index, bs).place(x) create_block().dense(index, bs).place(y) create_block().dense(index, bs).place(y2) ndrange = (((bs[i] * 2), (N - (bs[i] * 2))) for i in range(dim)) if (block_dim is None): block_dim = 1 for i in range(dim): block_dim *= bs[i] def populate(): for I in ti.grouped(ti.ndrange(*ndrange)): s = 0 for i in ti.static(range(dim)): s += (I[i] ** (i + 1)) x[I] = s def apply(use_bls: ti.template(), y: ti.template()): if ti.static((use_bls and (not scatter))): ti.block_local(x) if ti.static((use_bls and scatter)): ti.block_local(y) ti.loop_config(block_dim=block_dim) for I in ti.grouped(x): if ti.static(scatter): for offset in ti.static(stencil): y[(I + ti.Vector(offset))] += x[I] else: s = 0 for offset in ti.static(stencil): s = (s + x[(I + ti.Vector(offset))]) y[I] = s populate() if benchmark: for i in range(benchmark): x.snode.parent().deactivate_all() if (not scatter): populate() y.snode.parent().deactivate_all() y2.snode.parent().deactivate_all() apply(False, y2) apply(True, y) else: apply(False, y2) apply(True, y) def check(): for I in ti.grouped(y2): if (y[I] != y2[I]): print('check failed', I, y[I], y2[I]) mismatch[None] = 1 check() ti.profiler.print_kernel_profiler_info() assert (mismatch[None] == 0)
def _protobuf_file_saver(ctx, filename, ext): logger.info('Saving {} as protobuf'.format(filename)) with get_file_handle_save(filename, ext) as f: f.write(ctx.proto.SerializeToString())
def crossCheckTest(SrcList, fdemType1, fdemType2, comp, addrandoms=False, useMu=False, TOL=1e-05, verbose=False): def l2norm(r): return np.sqrt(r.dot(r)) prb1 = getFDEMProblem(fdemType1, comp, SrcList, freq, useMu, verbose) mesh = prb1.mesh print('Cross Checking Forward: {0!s}, {1!s} formulations - {2!s}'.format(fdemType1, fdemType2, comp)) logsig = np.log((np.ones(mesh.nC) * CONDUCTIVITY)) mu = (np.ones(mesh.nC) * MU) if (addrandoms is True): logsig += ((np.random.randn(mesh.nC) * np.log(CONDUCTIVITY)) * 0.1) mu += ((np.random.randn(mesh.nC) * MU) * 0.1) if (useMu is True): m = np.r_[(logsig, mu)] else: m = logsig d1 = prb1.dpred(m) if verbose: print(' Problem 1 solved') prb2 = getFDEMProblem(fdemType2, comp, SrcList, freq, useMu, verbose) d2 = prb2.dpred(m) if verbose: print(' Problem 2 solved') r = (d2 - d1) l2r = l2norm(r) tol = np.max([(TOL * (10 ** int(np.log10((0.5 * (l2norm(d1) + l2norm(d2))))))), FLR]) print(l2norm(d1), l2norm(d2), l2r, tol, (l2r < tol)) return (l2r < tol)
def output_fn(batch_preds, tokenizer): return tokenizer.batch_decode(batch_preds, skip_special_tokens=True)
def _winding_number(vertices, point): def _intersects(start, end, y0): if (end[1] < start[1]): (start, end) = (end, start) return (start[1] < y0 < end[1]) def _is_left(point, edge): (start, end) = (edge[0], edge[1]) if (end[1] == start[1]): return False x_in = (start[0] + (((point[1] - start[1]) * (end[0] - start[0])) / (end[1] - start[1]))) return (x_in > point[0]) sides = [] wn = 0 for i in range(0, (len(vertices) - 1)): if _intersects(vertices[i], vertices[(i + 1)], point[1]): sides.append([vertices[i], vertices[(i + 1)]]) if _intersects(vertices[(- 1)], vertices[0], point[1]): sides.append([vertices[(- 1)], vertices[0]]) for side in sides: if _is_left(point, side): if (side[1][1] > side[0][1]): wn = (wn + 1) if (side[1][1] < side[0][1]): wn = (wn - 1) return wn
class BertMatchingTrainDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, data: list, max_len: int=64): self.tokenizer = tokenizer self.data = data self.max_len = max_len def __len__(self): return len(self.data) def text_2_id(self, text_1: str, text_2: str=None): return self.tokenizer(text_1, text_2, max_length=(self.max_len * 2), truncation=True, padding='max_length', return_tensors='pt') def __getitem__(self, index: int): line = self.data[index] return (self.text_2_id(line[0], line[1]), line[2])
def process_file(args, file): print(f'reading file {file}', flush=True) total_records_in = 0 total_records_out = 0 random.seed(args.seed) def yield_samples(file): with open(file, 'r') as f: for line in f: (yield line) out_file = f'{args.out_bucket_path}/{os.path.basename(file)}' out_file_tmp = f'{out_file}.tmp' os.makedirs(os.path.dirname(out_file_tmp), exist_ok=True) if os.path.exists(out_file): print(f'skipping file {out_file}', flush=True) return (total_records_in, total_records_out) with open(out_file_tmp, 'w') as f: for (i, record) in enumerate(yield_samples(file)): if ((i % 10000) == 0): print(file, i, flush=True) total_records_in += 1 if keep_file(record): total_records_out += 1 f.write(record) print(f'finalizing file {out_file_tmp} -> {out_file} with {total_records_out} records', flush=True) os.rename(out_file_tmp, out_file) print(f'finalized file {out_file_tmp}') print(f'finished total_records_in={total_records_in} total_records_out={total_records_out}') return (total_records_in, total_records_out)
def name_length_is_invalid(name): is_invalid = ((len(name.split()) <= 1) or (len(name.split()) > 6)) return (True if is_invalid else False)
class FedAvgServer(): def __init__(self, deployer, model, params, n_clients): self._deployer = deployer self._model = model self._params = freeze(params) self._n_clients = n_clients self._trainer = Trainer(deployer=self._deployer, collate_fn=collate_fn, apply_fn=self._model.apply, loss_fn=loss_fn, params=params, optimizer=optax.adam(learning_rate=0.0)) self._predictor = Predictor(deployer=self._deployer, collate_fn=collate_fn, pred_fn=partial(pred_fn, model=self._model), output_fn=(lambda x: x.tolist())) def train_client(self, examples, learning_rate, per_device_batch_size, n_epochs): self._trainer.set_train_state(apply_fn=self._model.apply, params=self._params, optimizer=optax.adam(learning_rate=learning_rate), step=0) self._trainer.fit(train_examples=examples, per_device_batch_size=per_device_batch_size, n_epochs=n_epochs) return self._trainer.params def test(self, examples, per_device_batch_size): preds = self._predictor.predict(examples=examples, per_device_batch_size=per_device_batch_size, params=self._params) labels = [example['label'] for example in examples] acc = np.mean((np.array(preds) == np.array(labels))) conf_mat = confusion_matrix(y_true=labels, y_pred=preds) return (acc, conf_mat) def run(self, n_rounds, n_clients_per_round, n_client_epochs_per_round, per_device_batch_size, eval_per_device_batch_size, learning_rate, client_train_datasets, eval_dataset): for round_idx in tqdm.trange(n_rounds, desc='Server Round'): round_client_idxes = np.random.choice(np.arange(self._n_clients), n_clients_per_round, replace=False) sum_client_params = jax.tree_util.tree_map(jnp.zeros_like, self._params) for client_idx in round_client_idxes: client_params = self.train_client(examples=client_train_datasets[client_idx], learning_rate=learning_rate, per_device_batch_size=per_device_batch_size, n_epochs=n_client_epochs_per_round) sum_client_params = jax.tree_util.tree_map((lambda x, y: (x + y)), sum_client_params, client_params) self._params = jax.tree_util.tree_map((lambda x: (x / n_clients_per_round)), sum_client_params) (acc, conf_mat) = self.test(examples=eval_dataset, per_device_batch_size=eval_per_device_batch_size) print(f'Round {round_idx} finished.') print(f'Test accuracy: {acc}') print(f'''Confusion matrix: {conf_mat}''')
def sanitize_name(name): name = re.sub('type-parameter-0-([0-9]+)', 'T\\1', name) for (k, v) in CPP_OPERATORS.items(): name = name.replace(('operator%s' % k), ('operator_%s' % v)) name = re.sub('<.*>', '', name) name = ''.join([(ch if ch.isalnum() else '_') for ch in name]) name = re.sub('_$', '', re.sub('_+', '_', name)) return ('__doc_' + name)
class conv1x1(nn.Module): def __init__(self, in_ch, out_ch): super(conv1x1, self).__init__() self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 1, stride=1, padding=0), nn.BatchNorm2d(out_ch), nn.LeakyReLU(0.2, inplace=True)) def forward(self, x): x = self.conv(x) return x
def UnitaryPolarGraph(m, q, algorithm='gap'): if (algorithm == 'gap'): from sage.libs.gap.libgap import libgap G = _polar_graph(m, (q ** 2), libgap.GeneralUnitaryGroup(m, q)) elif (algorithm is None): from sage.schemes.projective.projective_space import ProjectiveSpace from sage.modules.free_module_element import free_module_element as vector Fq = FiniteField((q ** 2), 'a') PG = map(vector, ProjectiveSpace((m - 1), Fq)) for v in PG: v.set_immutable() def P(x, y): return (sum(((x[j] * (y[((m - 1) - j)] ** q)) for j in range(m))) == 0) V = [x for x in PG if P(x, x)] G = Graph([V, (lambda x, y: P(x, y))], loops=False) else: raise ValueError('unknown algorithm!') G.relabel() G.name(('Unitary Polar Graph U' + str((m, q)))) if (m == 4): G.name(((G.name() + '; GQ') + str(((q ** 2), q)))) if (m == 5): G.name(((G.name() + '; GQ') + str(((q ** 2), (q ** 3))))) return G
class NoGradModifier(FunctionModifier): def __init__(self): super(NoGradModifier, self).__init__() def modify(self, f, inputs): params = [v.data for v in nn.get_parameters(grad_only=False).values()] inputs_ = [] for inp in inputs: if (inp.data not in params): inputs_.append(inp) else: inp = inp.get_unlinked_variable(need_grad=False) inputs_.append(inp) o = self._call_function(f.info.type_name, inputs_, f.info.args) return o
class Anchors(nn.Module): def __init__(self, anchor_scale=4.0, pyramid_levels=None, **kwargs): super().__init__() self.anchor_scale = anchor_scale if (pyramid_levels is None): self.pyramid_levels = [3, 4, 5, 6, 7] else: self.pyramid_levels = pyramid_levels self.strides = kwargs.get('strides', [(2 ** x) for x in self.pyramid_levels]) self.scales = np.array(kwargs.get('scales', [(2 ** 0), (2 ** (1.0 / 3.0)), (2 ** (2.0 / 3.0))])) self.ratios = kwargs.get('ratios', [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]) self.last_anchors = {} self.last_shape = None def forward(self, image): dtype = image.dtype image_shape = image.shape[2:] if ((image_shape == self.last_shape) and (image.device in self.last_anchors)): return self.last_anchors[image.device] if ((self.last_shape is None) or (self.last_shape != image_shape)): self.last_shape = image_shape if (dtype == torch.float16): dtype = np.float16 else: dtype = np.float32 boxes_all = [] for stride in self.strides: boxes_level = [] for (scale, ratio) in itertools.product(self.scales, self.ratios): if ((image_shape[1] % stride) != 0): raise ValueError('input size must be divided by the stride.') base_anchor_size = ((self.anchor_scale * stride) * scale) anchor_size_x_2 = ((base_anchor_size * ratio[0]) / 2.0) anchor_size_y_2 = ((base_anchor_size * ratio[1]) / 2.0) x = np.arange((stride / 2), image_shape[1], stride) y = np.arange((stride / 2), image_shape[0], stride) (xv, yv) = np.meshgrid(x, y) xv = xv.reshape((- 1)) yv = yv.reshape((- 1)) boxes = np.vstack(((yv - anchor_size_y_2), (xv - anchor_size_x_2), (yv + anchor_size_y_2), (xv + anchor_size_x_2))) boxes = np.swapaxes(boxes, 0, 1) boxes_level.append(np.expand_dims(boxes, axis=1)) boxes_level = np.concatenate(boxes_level, axis=1) boxes_all.append(boxes_level.reshape([(- 1), 4])) anchor_boxes = np.vstack(boxes_all) anchor_boxes = torch.from_numpy(anchor_boxes.astype(dtype)).to(image.device) anchor_boxes = anchor_boxes.unsqueeze(0) self.last_anchors[image.device] = anchor_boxes return anchor_boxes
def packbits(arr: np.ndarray): if (arr.size > 64): return 0 return int(arr.dot(_TABLE[:arr.size]))
def AMD_stable_Q(sim): AMD = compute_AMD(sim) pstar = sim.particles[0] ps = sim.particles[1:] for i in range((len(ps) - 1)): pIn = ps[i] pOut = ps[(i + 1)] orbIn = pIn.orbit(pstar) orbOut = pOut.orbit(pstar) alpha = (orbIn.a / orbOut.a) gamma = (pIn.m / pOut.m) LmbdaOut = (pOut.m * np.sqrt(orbOut.a)) Ccrit = critical_relative_AMD(alpha, gamma) C = (AMD / LmbdaOut) if (C > Ccrit): return False return True
def get_batcher(model_name): if ('rnf' in model_name): print('Getting special batcher for RNF') return batchers.EfficientRnfBatcher else: print('Return default autoregressive batcher') return batchers.EfficientAutoregressiveBatcher
def infer(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() with torch.no_grad(): for (step, (input, target)) in enumerate(valid_queue): input = input.cuda() target = target.cuda(non_blocking=True) (logits, _) = model(input) loss = criterion(logits, target) (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if ((step % args.report_freq) == 0): logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return (top1.avg, top5.avg, objs.avg)
def check_package_data(dist, attr, value): if isinstance(value, dict): for (k, v) in value.items(): if (not isinstance(k, str)): break try: iter(v) except TypeError: break else: return raise DistutilsSetupError((attr + ' must be a dictionary mapping package names to lists of wildcard patterns'))
def simple_dtype_fmt(): ld = np.dtype('longdouble') simple_ld_off = (12 + (4 * (ld.alignment > 4))) return dt_fmt().format(ld.itemsize, simple_ld_off, (simple_ld_off + ld.itemsize))
def levenshtein_distance(first, second): (lmatrix, backpointers) = levenshtein_matrix(first, second) return lmatrix[(- 1)][(- 1)]
.slow .xfail(reason='stacklevels currently missing') def test_warning_calls_stacklevels(warning_calls): (bad_filters, bad_stacklevels) = warning_calls msg = '' if bad_filters: msg += 'warning ignore filter should not be used, instead, use\nnumpy.testing.suppress_warnings (in tests only);\nfound in:\n {}'.format('\n '.join(bad_filters)) msg += '\n\n' if bad_stacklevels: msg += 'warnings should have an appropriate stacklevel:\n {}'.format('\n '.join(bad_stacklevels)) if msg: raise AssertionError(msg)
def cardinality_exhaustive(self): self._order = Integer((1 + sum((len(self.lift_x(x, all=True)) for x in self.base_field())))) return self._order
def find_constituent_end(gold_sequence, cur_index): count = 0 saw_shift = False while (cur_index < len(gold_sequence)): if isinstance(gold_sequence[cur_index], OpenConstituent): count = (count + 1) elif isinstance(gold_sequence[cur_index], CloseConstituent): count = (count - 1) if (count == (- 1)): return cur_index elif isinstance(gold_sequence[cur_index], Shift): if (saw_shift and (count == 0)): return cur_index else: saw_shift = True cur_index = (cur_index + 1) return None
def find_matching_of_a_segment_given_sorted_val_corres_idx(sorted_values, sorted_indices, criteria='threshold', threshold=0.7, topK=3): matched_steps = list() matched_steps_score = list() if (criteria == 'threshold'): for i in range(len(sorted_values)): if (sorted_values[i] > threshold): matched_steps.append(sorted_indices[i]) matched_steps_score.append(sorted_values[i]) elif (criteria == 'threshold+topK'): for i in range(len(sorted_values)): if (sorted_values[i] > threshold): if (len(matched_steps) < topK): matched_steps.append(sorted_indices[i]) matched_steps_score.append(sorted_values[i]) else: break elif (criteria == 'topK'): for i in range(len(sorted_indices)): if (len(matched_steps) < topK): matched_steps.append(sorted_indices[i]) matched_steps_score.append(sorted_values[i]) else: break else: print('The criteria is not implemented!\nFunc: {}\nFile:{}'.format(__name__, __file__)) os._exit(0) return (matched_steps, matched_steps_score)
_module() class BertEncoder(BaseModule): def __init__(self, num_hidden_layers=12, initializer_range=0.02, vocab_size=21128, hidden_size=768, max_position_embeddings=128, type_vocab_size=2, layer_norm_eps=1e-12, hidden_dropout_prob=0.1, output_attentions=False, output_hidden_states=False, num_attention_heads=12, attention_probs_dropout_prob=0.1, intermediate_size=3072, hidden_act_cfg=dict(type='GeluNew'), init_cfg=[dict(type='Xavier', layer='Conv2d'), dict(type='Uniform', layer='BatchNorm2d')]): super().__init__(init_cfg=init_cfg) self.bert = BertModel(num_hidden_layers=num_hidden_layers, initializer_range=initializer_range, vocab_size=vocab_size, hidden_size=hidden_size, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, layer_norm_eps=layer_norm_eps, hidden_dropout_prob=hidden_dropout_prob, output_attentions=output_attentions, output_hidden_states=output_hidden_states, num_attention_heads=num_attention_heads, attention_probs_dropout_prob=attention_probs_dropout_prob, intermediate_size=intermediate_size, hidden_act_cfg=hidden_act_cfg) def forward(self, results): device = next(self.bert.parameters()).device input_ids = results['input_ids'].to(device) attention_masks = results['attention_masks'].to(device) token_type_ids = results['token_type_ids'].to(device) outputs = self.bert(input_ids=input_ids, attention_masks=attention_masks, token_type_ids=token_type_ids) return outputs
class GrailEntityCandidate(): def __init__(self, id, label, facc_label, surface_score, pop_score, relations): self.id = id self.label = label self.facc_label = facc_label self.surface_score = surface_score self.pop_score = pop_score self.relations = relations def __str__(self): return '{}:{}:{:.2f}'.format(self.id, self.label, self.surface_score) def __repr__(self): return self.__str__()
def test_sanitize_history_empty(case_factory): case = case_factory(headers={'Password': 'password'}) source = CaseSource(case=case, response=requests.Response(), elapsed=0.1) sanitize_history(source) assert (case.headers == {'Password': '[Filtered]'})
def HarborthGraph(): g = Graph(':s_OGKI?_?g[QABAo__?iIEbqHWuWLbbh?}[OfcXpGhNHdYPY_SgdYX]pZkfJPuo[lfZHys^mFcDs}`pG{UNNgoHC}DIgrI[qjMhTyDQrQlVydrBYmWkn', loops=False, multiedges=False) g.set_pos({0: (51.5, 400.0), 1: (90.6, 308.0), 2: (90.6, 492.0), 3: (129.8, 216.0), 4: (129.8, 584.0), 5: (150.7, 387.9), 6: (150.7, 412.1), 7: (169.0, 124.0), 8: (169.0, 676.0), 9: (189.9, 295.9), 10: (189.9, 504.1), 11: (229.1, 203.9), 12: (229.1, 596.1), 13: (250.0, 400.0), 14: (251.4, 180.6), 15: (251.4, 619.4), 16: (256.1, 300.2), 17: (256.1, 499.8), 18: (259.3, 80.9), 19: (259.3, 719.1), 20: (333.8, 237.2), 21: (333.8, 562.8), 22: (341.7, 137.5), 23: (341.7, 662.5), 24: (350.0, 37.9), 25: (350.0, 336.0), 26: (350.0, 464.0), 27: (350.0, 762.1), 28: (358.3, 137.5), 29: (358.3, 662.5), 30: (366.2, 237.2), 31: (366.2, 562.8), 32: (440.7, 80.9), 33: (440.7, 719.1), 34: (443.9, 300.2), 35: (443.9, 499.8), 36: (448.6, 180.6), 37: (448.6, 619.4), 38: (450.0, 400.0), 39: (470.9, 203.9), 40: (470.9, 596.1), 41: (510.1, 295.9), 42: (510.1, 504.1), 43: (531.0, 124.0), 44: (531.0, 676.0), 45: (549.3, 387.9), 46: (549.3, 412.1), 47: (570.2, 216.0), 48: (570.2, 584.0), 49: (609.4, 308.0), 50: (609.4, 492.0), 51: (648.5, 400.0)}) g.name('Harborth Graph') return g
def get_images(path_to_dir): num_images = len(glob.glob(os.path.join(path_to_dir, '*.jpg'))) images = [] for i in range(num_images): image = Image.open(os.path.join(path_to_dir, f'{(i + 1)}.jpg')) images.append(image) return images
def is_valid_dm(D, tol=0.0, throw=False, name='D', warning=False): D = np.asarray(D, order='c') valid = True try: s = D.shape if (len(D.shape) != 2): if name: raise ValueError(("Distance matrix '%s' must have shape=2 (i.e. be two-dimensional)." % name)) else: raise ValueError('Distance matrix must have shape=2 (i.e. be two-dimensional).') if (tol == 0.0): if (not (D == D.T).all()): if name: raise ValueError(("Distance matrix '%s' must be symmetric." % name)) else: raise ValueError('Distance matrix must be symmetric.') if (not (D[(range(0, s[0]), range(0, s[0]))] == 0).all()): if name: raise ValueError(("Distance matrix '%s' diagonal must be zero." % name)) else: raise ValueError('Distance matrix diagonal must be zero.') else: if (not ((D - D.T) <= tol).all()): if name: raise ValueError(f"Distance matrix '{name}' must be symmetric within tolerance {tol:5.5f}.") else: raise ValueError(('Distance matrix must be symmetric within tolerance %5.5f.' % tol)) if (not (D[(range(0, s[0]), range(0, s[0]))] <= tol).all()): if name: raise ValueError(f"Distance matrix '{name}' diagonal must be close to zero within tolerance {tol:5.5f}.") else: raise ValueError("Distance matrix '{}' diagonal must be close to zero within tolerance {:5.5f}.".format(*tol)) except Exception as e: if throw: raise if warning: warnings.warn(str(e), stacklevel=2) valid = False return valid
class RunningMean(): def __init__(self): self.mean = 0.0 self.n = 0 def __iadd__(self, value): self.mean = ((float(value) + (self.mean * self.n)) / (self.n + 1)) self.n += 1 return self def reset(self): self.mean = 0.0 self.n = 0 def mean(self): return self.mean
.parametrize('ctx, func_name', ctxs) .parametrize('w_shape , channel_axis', [((8, 4, 3, 3), 0), ((32, 16, 3, 3), (- 2)), ((16, 1), 1), ((8, 4, 16), (- 1)), ((4, 2, 8), 2)]) .parametrize('eps', [1e-05]) .parametrize('output_stat', [False, True]) def test_weight_standardization_forward_backward(rng, ctx, func_name, w_shape, channel_axis, eps, output_stat): from nbla_test_utils import function_tester w = np.array(rng.randn(*w_shape).astype(np.float32)) function_tester(rng, F.weight_standardization, ref_weight_standardization, [w], [channel_axis, eps, output_stat], ctx=ctx, func_name=func_name, dstep=0.01, atol_b=0.01)
(frozen=True) class Utterance(): speaker: Speaker text: str def __str__(self): assert (self.speaker.name is not None), 'Speaker name needs to be set for generating utterance string' return (f'{self.speaker.name}: {self.text}' + '\n')
class VisionNetwork(nn.Module): def __init__(self, conv_encoder: str, activation_function: str, dropout_vis_fc: float, l2_normalize_output: bool, visual_features: int, num_c: int): super(VisionNetwork, self).__init__() self.l2_normalize_output = l2_normalize_output self.act_fn = getattr(nn, activation_function)() self.conv_model = eval(conv_encoder) self.conv_model = self.conv_model(self.act_fn, num_c) self.fc1 = nn.Sequential(nn.Linear(in_features=128, out_features=512), self.act_fn, nn.Dropout(dropout_vis_fc)) self.fc2 = nn.Linear(in_features=512, out_features=visual_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.conv_model(x) x = self.fc1(x) x = self.fc2(x) if self.l2_normalize_output: x = F.normalize(x, p=2, dim=1) return x
def load_tf_weights_in_mobilebert(*args, **kwargs): requires_backends(load_tf_weights_in_mobilebert, ['torch'])
def test_array(): array = ak.Array({'x': [1, 2, 3]}) array['x'] = [4] assert (array.to_list() == [{'x': 4}, {'x': 4}, {'x': 4}])
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True): feature_names_in_ = getattr(estimator, 'feature_names_in_', None) n_features_in_ = getattr(estimator, 'n_features_in_', None) if (input_features is not None): input_features = np.asarray(input_features, dtype=object) if ((feature_names_in_ is not None) and (not np.array_equal(feature_names_in_, input_features))): raise ValueError('input_features is not equal to feature_names_in_') if ((n_features_in_ is not None) and (len(input_features) != n_features_in_)): raise ValueError(f'input_features should have length equal to number of features ({n_features_in_}), got {len(input_features)}') return input_features if (feature_names_in_ is not None): return feature_names_in_ if (not generate_names): return if (n_features_in_ is None): raise ValueError('Unable to generate feature names without n_features_in_') return np.asarray([f'x{i}' for i in range(n_features_in_)], dtype=object)
class Hyperparameter(metaclass=abc.ABCMeta): def __init__(self, name): self._name = name def name(self): return self._name
def test_find_one_return_none(): entry = rldb.find_one({'env-title': 'There is no env with this title.'}) assert (entry is None)
class StructTest1(StructTestFunction): def f(self, x): return ((x[0] ** 2) + (x[1] ** 2)) def g(x): return (- (numpy.sum(x, axis=0) - 6.0)) cons = wrap_constraints(g)
def compute_average_flops_cost(self): batches_count = self.__batch_counter__ flops_sum = 0 for module in self.modules(): if isinstance(module, torch.nn.Conv2d): flops_sum += module.__flops__ return (flops_sum / batches_count)
_grad() def ema_update(model, averaged_model, decay): model_params = dict(model.named_parameters()) averaged_params = dict(averaged_model.named_parameters()) assert (model_params.keys() == averaged_params.keys()) for (name, param) in model_params.items(): averaged_params[name].mul_(decay).add_(param, alpha=(1 - decay)) model_buffers = dict(model.named_buffers()) averaged_buffers = dict(averaged_model.named_buffers()) assert (model_buffers.keys() == averaged_buffers.keys()) for (name, buf) in model_buffers.items(): averaged_buffers[name].copy_(buf)
class Dataset(object): def __init__(self, ids, n, scene_class, name='default', max_examples=None, is_train=True, bound=10): self._ids = list(ids) self.name = name self.is_train = is_train self.n = n self.bound = bound if (max_examples is not None): self._ids = self._ids[:max_examples] filename = 'data_{}.hdf5'.format(scene_class) file = osp.join('./datasets/{}'.format(scene_class), filename) log.info('Reading %s ...', file) self.data = h5py.File(file, 'r') log.info('Reading Done: %s', file) def get_data(self, id, order=None): if isinstance(id, bytes): id = id.decode('utf-8') image = (((self.data[id]['image'].value / 255.0) * 2) - 1) pose = np.expand_dims(self.data[id]['pose'].value, (- 1)) valid = False id_num = int(id[(- num_digit):]) while (not valid): random_num = np.random.randint((- self.bound), self.bound) id_target = (id[:(- num_digit)] + str((id_num + random_num)).zfill(num_digit)) if (id_target in self.data): image_tmp = (((self.data[id_target]['image'].value / 255.0) * 2) - 1) pose_tmp = np.expand_dims(self.data[id_target]['pose'].value, (- 1)) image = np.concatenate((image, image_tmp), axis=(- 1)) pose = np.concatenate((pose, pose_tmp), axis=(- 1)) if (pose.shape[(- 1)] == (self.n + 1)): valid = True return (image, pose) def get_data_by_id(self, id_list): if isinstance(id_list[0], bytes): id_list = [id.decode('utf-8') for id in id_list] id = id_list[0] image = (((self.data[id]['image'].value / 255.0) * 2) - 1) pose = np.expand_dims(self.data[id]['pose'].value, (- 1)) for id_source in id_list[1:]: if (not (pose.shape[(- 1)] > self.n)): image_tmp = (((self.data[id_source]['image'].value / 255.0) * 2) - 1) pose_tmp = np.expand_dims(self.data[id_source]['pose'].value, (- 1)) image = np.concatenate((image, image_tmp), axis=(- 1)) pose = np.concatenate((pose, pose_tmp), axis=(- 1)) return (image, pose) def get_data_by_id_tuple(self, id_target, id_input): image_target = (((self.data[id_target]['image'].value / 255.0) * 2) - 1) pose_target = np.expand_dims(self.data[id_target]['pose'].value, (- 1)) image_input = (((self.data[id_input]['image'].value / 255.0) * 2) - 1) pose_input = np.expand_dims(self.data[id_input]['pose'].value, 0) pose = np.concatenate((pose_target, pose_input), axis=(- 1)) return (image, pose) def get_data_by_target(self, id_input, target_idx): if isinstance(id_input, bytes): id_input = id_input.decode('utf-8') input_image = (((self.data[id_input]['image'].value / 255.0) * 2) - 1) input_pose = np.expand_dims(self.data[id_input]['pose'].value, (- 1)) id_num = int(id_input[(- num_digit):]) id_target = (id_input[:(- num_digit)] + str(int((id_num + target_idx[0]))).zfill(num_digit)) try: target_image = (((self.data[id_target]['image'].value / 255.0) * 2) - 1) target_pose = np.expand_dims(self.data[id_target]['pose'].value, (- 1)) except: target_image = input_image target_pose = input_pose pose = np.concatenate((target_pose, input_pose), axis=(- 1)) return (image, pose, id_target) def ids(self): return self._ids def __len__(self): return len(self.ids) def __repr__(self): return ('Dataset (%s, %d examples)' % (self.name, len(self)))
def data2csv(task_log, parser_output, postgres, fields): csv = {'filename': task_log['filename'], 'basename': os.path.basename(task_log['filename']), 'toolid': task_log['tool']['id'], 'toolmode': task_log['tool']['mode'], 'parser_version': parser_output['parser']['version'], 'runid': task_log['runid'], 'start': task_log['result']['start'], 'duration': task_log['result']['duration'], 'exit_code': task_log['result']['exit_code'], 'findings': sorted({sb.utils.str2label(f['name']) for f in parser_output['findings']}), 'infos': parser_output['infos'], 'errors': parser_output['errors'], 'fails': parser_output['fails']} for f in ('findings', 'infos', 'errors', 'fails'): if postgres: csv[f] = list2postgres(csv[f]) else: csv[f] = list2excel(csv[f]) return [csv[f] for f in fields]
class Layers(object): def __getattr__(self, name): def layer_fn(*args, **kwargs): fn = Function(name, args, kwargs) if (fn.ntop == 0): return fn elif (fn.ntop == 1): return fn.tops[0] else: return fn.tops return layer_fn