code
stringlengths
101
5.91M
def l2norm(inputs, dim=(- 1)): norm = torch.norm(inputs, p=2, dim=dim, keepdim=True) zeros = torch.zeros(norm.size()).cuda() zeros[(norm == 0)] = 1 inputs = (inputs / (norm + zeros)) return inputs
class RecordForm(RecordMeta[Form], Form): def __init__(self, contents, fields, *, parameters=None, form_key=None): if (not isinstance(contents, Iterable)): raise TypeError("{} 'contents' must be iterable, not {}".format(type(self).__name__, repr(contents))) for content in contents: if (not isinstance(content, Form)): raise TypeError("{} all 'contents' must be Form subclasses, not {}".format(type(self).__name__, repr(content))) if ((fields is not None) and (not isinstance(fields, Iterable))): raise TypeError(f"{type(self).__name__} 'fields' must be iterable, not {contents!r}") self._fields = (None if (fields is None) else list(fields)) self._contents = list(contents) if (fields is not None): assert (len(self._fields) == len(self._contents)) self._init(parameters=parameters, form_key=form_key) def contents(self): return self._contents def fields(self) -> list[str]: if (self._fields is None): return [str(i) for i in range(len(self._contents))] else: return self._fields def copy(self, contents=UNSET, fields=UNSET, *, parameters=UNSET, form_key=UNSET): return RecordForm((self._contents if (contents is UNSET) else contents), (self._fields if (fields is UNSET) else fields), parameters=(self._parameters if (parameters is UNSET) else parameters), form_key=(self._form_key if (form_key is UNSET) else form_key)) def simplified(cls, contents, fields, *, parameters=None, form_key=None): return cls(contents, fields, parameters=parameters, form_key=form_key) def __repr__(self): args = [repr(self._contents), repr(self._fields), *self._repr_args()] return '{}({})'.format(type(self).__name__, ', '.join(args)) def _to_dict_part(self, verbose, toplevel): out = {'class': 'RecordArray'} contents_tolist = [content._to_dict_part(verbose, toplevel=False) for content in self._contents] if (self._fields is not None): out['fields'] = list(self._fields) else: out['fields'] = None out['contents'] = contents_tolist return self._to_dict_extra(out, verbose) def type(self): return ak.types.RecordType([x.type for x in self._contents], self._fields, parameters=self._parameters) def _columns(self, path, output, list_indicator): for (content, field) in zip(self._contents, self.fields): content._columns((*path, field), output, list_indicator) def _prune_columns(self, is_inside_record_or_union: bool) -> (Self | None): contents = [] fields = [] for (content, field) in zip(self._contents, self.fields): next_content = content._prune_columns(True) if (next_content is None): continue contents.append(next_content) fields.append(field) if ((not fields) and is_inside_record_or_union): return None else: return self.copy(contents=contents, fields=fields) def _select_columns(self, match_specifier: _SpecifierMatcher) -> Self: contents = [] fields = [] for (content, field) in zip(self._contents, self.fields): next_match_specifier = match_specifier(field, next_match_if_empty=True) if (next_match_specifier is None): continue next_content = content._select_columns(next_match_specifier) contents.append(next_content) fields.append(field) return self.copy(contents=contents, fields=fields) def _column_types(self): return sum((x._column_types() for x in self._contents), ()) def __setstate__(self, state): if isinstance(state, dict): self.__dict__.update(state) else: (has_identities, parameters, form_key, recordlookup, contents) = state if (form_key is not None): form_key = ('part0-' + form_key) self.__init__(contents, recordlookup, parameters=parameters, form_key=form_key) def _expected_from_buffers(self, getkey: Callable[([Form, str], str)], recursive: bool) -> Iterator[tuple[(str, DType)]]: if recursive: for content in self._contents: (yield from content._expected_from_buffers(getkey, recursive)) def _is_equal_to(self, other: Any, all_parameters: bool, form_key: bool) -> bool: computed_fields_set = set(self.fields) return (self._is_equal_to_generic(other, all_parameters, form_key) and (self.is_tuple == other.is_tuple) and (len(self._contents) == len(other._contents)) and all(((f in computed_fields_set) for f in other.fields)) and all((content._is_equal_to(other.content(field), all_parameters, form_key) for (field, content) in zip(self.fields, self._contents))))
def loss_boundary(gtr, net, npoints=1000, dim=3, x=None, use_surf_points=False): if (x is None): (x, _) = sample_points(npoints, dim=dim, sample_surf_points=use_surf_points, invert_sampling=False, out_nf=gtr, deform=None) x = x.detach().cuda().float() bs = 1 x = x.view(bs, npoints, dim) elif (len(x.size()) == 2): (bs, npoints) = (1, x.size(0)) else: (bs, npoints) = (x.size(0), x.size(1)) x = x.view(bs, npoints, dim) if use_surf_points: net_y = net(x) loss_all = F.mse_loss(net_y, torch.zeros_like(net_y), reduction='none') else: net_y = net(x) gtr_y = gtr(x) loss_all = F.mse_loss(net_y, gtr_y, reduction='none') loss_all = loss_all.view(bs, npoints) loss = loss_all.mean() return (loss, x)
_numpy_output(check_dtype=True) def test_ufunc_ldexp_fs(A: dace.float32[10], B: dace.int32[10]): return np.ldexp(A, B)
def gen_interp_video(G, mp4: str, seeds, shuffle_seed=None, w_frames=(60 * 4), kind='cubic', grid_dims=(1, 1), num_keyframes=None, wraps=2, truncation_psi=1, device=torch.device('cuda'), centroids_path=None, class_idx=None, **video_kwargs): grid_w = grid_dims[0] grid_h = grid_dims[1] if (num_keyframes is None): if ((len(seeds) % (grid_w * grid_h)) != 0): raise ValueError('Number of input seeds must be divisible by grid W*H') num_keyframes = (len(seeds) // (grid_w * grid_h)) all_seeds = np.zeros(((num_keyframes * grid_h) * grid_w), dtype=np.int64) for idx in range(((num_keyframes * grid_h) * grid_w)): all_seeds[idx] = seeds[(idx % len(seeds))] if (shuffle_seed is not None): rng = np.random.RandomState(seed=shuffle_seed) rng.shuffle(all_seeds) if (class_idx is None): class_idx = ([None] * len(seeds)) elif (len(class_idx) == 1): class_idx = ([class_idx] * len(seeds)) assert (len(all_seeds) == len(class_idx)), 'Seeds and class-idx should have the same length' ws = [] for (seed, cls) in zip(all_seeds, class_idx): ws.append(gen_utils.get_w_from_seed(G, 1, device, truncation_psi, seed=seed, centroids_path=centroids_path, class_idx=cls)) ws = torch.cat(ws) _ = G.synthesis(ws[:1]) ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:]) grid = [] for yi in range(grid_h): row = [] for xi in range(grid_w): x = np.arange(((- num_keyframes) * wraps), (num_keyframes * (wraps + 1))) y = np.tile(ws[yi][xi].cpu().numpy(), [((wraps * 2) + 1), 1, 1]) interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) row.append(interp) grid.append(row) video_out = imageio.get_writer(mp4, mode='I', fps=60, codec='libx264', **video_kwargs) for frame_idx in tqdm(range((num_keyframes * w_frames))): imgs = [] for yi in range(grid_h): for xi in range(grid_w): interp = grid[yi][xi] w = torch.from_numpy(interp((frame_idx / w_frames))).to(device) img = G.synthesis(ws=w.unsqueeze(0), noise_mode='const')[0] imgs.append(img) video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) video_out.close()
class TransitionProbabilities(ProcessingPlasmaProperty): outputs = ('transition_probabilities',) def __init__(self, plasma_parent): super(TransitionProbabilities, self).__init__(plasma_parent) self.initialize = True self.normalize = True def calculate(self, atomic_data, beta_sobolev, j_blues, stimulated_emission_factor, tau_sobolevs): if (len(j_blues) == 0): return None macro_atom_data = self._get_macro_atom_data(atomic_data) if self.initialize: self.initialize_macro_atom_transition_type_filters(atomic_data, macro_atom_data) self.transition_probability_coef = self._get_transition_probability_coefs(macro_atom_data) self.initialize = False transition_probabilities = self._calculate_transition_probability(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) transition_probabilities = pd.DataFrame(transition_probabilities, index=macro_atom_data.transition_line_id, columns=tau_sobolevs.columns) return transition_probabilities def _calculate_transition_probability(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): transition_probabilities = np.empty((self.transition_probability_coef.shape[0], beta_sobolev.shape[1])) transition_type = macro_atom_data.transition_type.values lines_idx = macro_atom_data.lines_idx.values tpos = macro_atom_data.transition_probability.values macro_atom.calculate_transition_probabilities(tpos, beta_sobolev.values, j_blues.values, stimulated_emission_factor, transition_type, lines_idx, self.block_references, transition_probabilities, self.normalize) return transition_probabilities def calculate_transition_probabilities(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): transition_probabilities = self.prepare_transition_probabilities(macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor) return transition_probabilities def initialize_macro_atom_transition_type_filters(self, atomic_data, macro_atom_data): self.transition_up_filter = (macro_atom_data.transition_type.values == 1) self.transition_up_line_filter = macro_atom_data.lines_idx.values[self.transition_up_filter] self.block_references = np.hstack((atomic_data.macro_atom_references.block_references, len(macro_atom_data))) def _get_transition_probability_coefs(macro_atom_data): return macro_atom_data.transition_probability.values[np.newaxis].T def prepare_transition_probabilities(self, macro_atom_data, beta_sobolev, j_blues, stimulated_emission_factor): current_beta_sobolev = beta_sobolev.values.take(macro_atom_data.lines_idx.values, axis=0, mode='raise') transition_probabilities = (self.transition_probability_coef * current_beta_sobolev) j_blues = j_blues.take(self.transition_up_line_filter, axis=0, mode='raise') macro_stimulated_emission = stimulated_emission_factor.take(self.transition_up_line_filter, axis=0, mode='raise') transition_probabilities[self.transition_up_filter] *= (j_blues * macro_stimulated_emission) return transition_probabilities def _normalize_transition_probabilities(self, transition_probabilities): macro_atom.normalize_transition_probabilities(transition_probabilities, self.block_references) def _get_macro_atom_data(atomic_data): try: return atomic_data.macro_atom_data except: logger.debug('Macro Atom Data was not found. Instead returning All Macro Atom Data') return atomic_data.macro_atom_data_all
def normalize(v): norm = np.linalg.norm(v) if (norm == 0): return v return (v / norm)
class StateDumpingRNN(tf.contrib.rnn.RNNCell): def __init__(self, lstm): super(StateDumpingRNN, self).__init__() self.lstm_cell = lstm def state_size(self): return self.lstm_cell.state_size def output_size(self): return self.lstm_cell.state_size def call(self, inputs, state): (output, state) = self.lstm_cell(inputs, state) return (state, state)
class DCGAN(nn.Module): def __init__(self, gen_cfg, disc_cfg, **kwargs): super().__init__() self.generator = hydra.utils.instantiate(gen_cfg) self.discriminator = hydra.utils.instantiate(disc_cfg) def gen_backward(self, batch_size): fake_samples = self.generator.sample(batch_size) fake_probs = self.discriminator(fake_samples) labels = try_cuda(torch.full((batch_size,), 1.0)) gen_loss = F.binary_cross_entropy(fake_probs, labels) gen_loss.backward() return (gen_loss, fake_samples) def disc_backward(self, real_samples): batch_size = real_samples.size(0) real_probs = self.discriminator(real_samples) labels = try_cuda(torch.full((batch_size,), 1.0)) real_loss = F.binary_cross_entropy(real_probs, labels) real_loss.backward() with torch.no_grad(): fake_samples = self.generator.sample(batch_size) fake_probs = self.discriminator(fake_samples) labels = try_cuda(torch.full((batch_size,), 0.0)) fake_loss = F.binary_cross_entropy(fake_probs, labels) fake_loss.backward() return (real_loss + fake_loss)
def labeled_eval(corefes, goldfes, predargmax, notanfeid): def score(fe_id): if (corefes == {}): return 1.0 if (fe_id in corefes): return 1.0 return 0.5 ltp = lfp = lfn = 0.0 for goldfe in goldfes: if (goldfe == notanfeid): continue if ((goldfe in predargmax) and (set(predargmax[goldfe]) == set(goldfes[goldfe]))): ltp += score(goldfe) else: lfn += score(goldfe) for predfe in predargmax: if (predfe == notanfeid): continue if ((predfe not in goldfes) or (set(goldfes[predfe]) != set(predargmax[predfe]))): lfp += score(predfe) return (ltp, lfp, lfn)
class LayerNorm(Module): __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[(int, ...)] eps: float elementwise_affine: bool def __init__(self, normalized_shape: _shape_t, eps: float=1e-05, elementwise_affine: bool=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super(LayerNorm, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = (normalized_shape,) self.normalized_shape = tuple(normalized_shape) self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input: Tensor) -> Tensor: return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps) def extra_repr(self) -> str: return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class Divisor_curve(Divisor_generic): def __init__(self, v, parent=None, check=True, reduce=True): from sage.schemes.generic.divisor_group import DivisorGroup_curve if (not isinstance(v, (list, tuple))): v = [(1, v)] if (parent is None): if v: t = v[0] if (isinstance(t, tuple) and (len(t) == 2)): try: C = t[1].scheme() except (TypeError, AttributeError): raise TypeError('Argument v (= %s) must consist of multiplicities and points on a scheme.') else: try: C = t.scheme() except TypeError: raise TypeError('Argument v (= %s) must consist of multiplicities and points on a scheme.') parent = DivisorGroup_curve(C) else: raise TypeError('Argument v (= %s) must consist of multiplicities and points on a scheme.') else: if (not isinstance(parent, DivisorGroup_curve)): raise TypeError(('parent (of type %s) must be a DivisorGroup_curve' % type(parent))) C = parent.scheme() if (len(v) < 1): check = False know_points = False if check: w = [] points = [] know_points = True for t in v: if (isinstance(t, tuple) and (len(t) == 2)): n = ZZ(t[0]) I = t[1] points.append((n, I)) else: n = ZZ(1) I = t if is_SchemeMorphism(I): I = CurvePointToIdeal(C, I) else: know_points = False w.append((n, I)) v = w Divisor_generic.__init__(self, v, check=False, reduce=True, parent=parent) if know_points: self._points = points def _repr_(self): return repr_lincomb([(tuple(I.gens()), c) for (c, I) in self]) def support(self): try: return self._support except AttributeError: try: pts = self._points except AttributeError: self._points = [(m, self.scheme().ambient_space().subscheme(p).rational_points()[0]) for (m, p) in self] pts = self._points self._support = [s[1] for s in pts] return self._support def coefficient(self, P): P = self.parent().scheme()(P) if (P not in self.support()): return self.base_ring().zero() (t, i) = search(self.support(), P) assert t try: return self._points[i][0] except AttributeError: raise NotImplementedError
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split('conv_layers.')[(- 1)] items = name.split('.') layer_id = int(items[0]) type_id = int(items[1]) if (type_id == 0): if ('bias' in name): assert (value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape), f'{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif ('weight' in name): assert (value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape), f'{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif (((type_id == 2) and (not use_group_norm)) or ((type_id == 2) and (layer_id == 0) and use_group_norm)): if ('bias' in name): assert (value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape), f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif ('weight' in name): assert (value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape), f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(full_name)
def _parse_dim_atom(das, atom): result = pyexpr_to_symbolic(das, atom) if isinstance(result, data.Data): return pystr_to_symbolic(astutils.unparse(atom)) return result
def test_multiple_and_dlrep_binding(): secrets = get_secrets_new(4) generators = make_generators(4) lhs_values = [(x.value * g) for (x, g) in zip(secrets, generators)] p1 = DLNotEqual([lhs_values[0], generators[0]], [lhs_values[1], generators[1]], secrets[0], bind=False) p2 = DLRep(lhs_values[2], (secrets[2] * generators[2])) p3 = DLNotEqual([lhs_values[2], generators[2]], [lhs_values[1], generators[1]], secrets[2], bind=True) p4 = DLNotEqual([lhs_values[1], generators[1]], [lhs_values[3], generators[3]], secrets[1], bind=True) andp = (((p1 & p2) & p3) & p4) s0 = Secret() s2 = Secret() p1prime = DLNotEqual([lhs_values[0], generators[0]], [lhs_values[1], generators[1]], s0, bind=False) p2prime = DLRep(lhs_values[2], (s2 * generators[2])) p3prime = DLNotEqual([lhs_values[2], generators[2]], [lhs_values[1], generators[1]], s2, bind=True) p4prime = DLNotEqual([lhs_values[1], generators[1]], [lhs_values[3], generators[3]], s0, bind=True) andp1 = (((p1prime & p2prime) & p3prime) & p4prime) protocol = SigmaProtocol(andp1.get_verifier(), andp.get_prover()) assert protocol.verify()
def cb(option, value, parser): arguments = [value] for arg in parser.rargs: if (arg[0] != '-'): arguments.append(arg) else: del parser.rargs[:len(arguments)] break if getattr(parser.values, option.dest): arguments.extend(getattr(parser.values, option.dest)) setattr(parser.values, option.dest, arguments)
class KMeans(_BaseKMeans): _parameter_constraints: dict = {**_BaseKMeans._parameter_constraints, 'copy_x': ['boolean'], 'algorithm': [StrOptions({'lloyd', 'elkan', 'auto', 'full'}, deprecated={'auto', 'full'})]} def __init__(self, n_clusters=8, *, init='k-means++', n_init='auto', max_iter=300, tol=0.0001, verbose=0, random_state=None, copy_x=True, algorithm='lloyd'): super().__init__(n_clusters=n_clusters, init=init, n_init=n_init, max_iter=max_iter, tol=tol, verbose=verbose, random_state=random_state) self.copy_x = copy_x self.algorithm = algorithm def _check_params_vs_input(self, X): super()._check_params_vs_input(X, default_n_init=10) self._algorithm = self.algorithm if (self._algorithm in ('auto', 'full')): warnings.warn(f"algorithm='{self._algorithm}' is deprecated, it will be removed in 1.3. Using 'lloyd' instead.", FutureWarning) self._algorithm = 'lloyd' if ((self._algorithm == 'elkan') and (self.n_clusters == 1)): warnings.warn("algorithm='elkan' doesn't make sense for a single cluster. Using 'lloyd' instead.", RuntimeWarning) self._algorithm = 'lloyd' def _warn_mkl_vcomp(self, n_active_threads): warnings.warn(f'KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS={n_active_threads}.') _fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None, sample_weight=None): X = self._validate_data(X, accept_sparse='csr', dtype=[np.float64, np.float32], order='C', copy=self.copy_x, accept_large_sparse=False) self._check_params_vs_input(X) random_state = check_random_state(self.random_state) sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) self._n_threads = _openmp_effective_n_threads() init = self.init init_is_array_like = _is_arraylike_not_scalar(init) if init_is_array_like: init = check_array(init, dtype=X.dtype, copy=True, order='C') self._validate_center_shape(X, init) if (not sp.issparse(X)): X_mean = X.mean(axis=0) X -= X_mean if init_is_array_like: init -= X_mean x_squared_norms = row_norms(X, squared=True) if (self._algorithm == 'elkan'): kmeans_single = _kmeans_single_elkan else: kmeans_single = _kmeans_single_lloyd self._check_mkl_vcomp(X, X.shape[0]) (best_inertia, best_labels) = (None, None) for i in range(self._n_init): centers_init = self._init_centroids(X, x_squared_norms=x_squared_norms, init=init, random_state=random_state, sample_weight=sample_weight) if self.verbose: print('Initialization complete') (labels, inertia, centers, n_iter_) = kmeans_single(X, sample_weight, centers_init, max_iter=self.max_iter, verbose=self.verbose, tol=self._tol, n_threads=self._n_threads) if ((best_inertia is None) or ((inertia < best_inertia) and (not _is_same_clustering(labels, best_labels, self.n_clusters)))): best_labels = labels best_centers = centers best_inertia = inertia best_n_iter = n_iter_ if (not sp.issparse(X)): if (not self.copy_x): X += X_mean best_centers += X_mean distinct_clusters = len(set(best_labels)) if (distinct_clusters < self.n_clusters): warnings.warn('Number of distinct clusters ({}) found smaller than n_clusters ({}). Possibly due to duplicate points in X.'.format(distinct_clusters, self.n_clusters), ConvergenceWarning, stacklevel=2) self.cluster_centers_ = best_centers self._n_features_out = self.cluster_centers_.shape[0] self.labels_ = best_labels self.inertia_ = best_inertia self.n_iter_ = best_n_iter return self
class Base1(nn.Module): def __init__(self): super(Base1, self).__init__() self.maxpool = nn.MaxPool2d(2, 2) self.conv1_1_2 = BaseConv(3, 64, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv1_2_2 = BaseConv(64, 64, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv2_1_2 = BaseConv(64, 128, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv2_2_2 = BaseConv(128, 128, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv3_1_2 = BaseConv(128, 256, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv3_2_2 = BaseConv(256, 256, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv3_3_2 = BaseConv(256, 256, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv4_1_2 = BaseConv(256, 512, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv4_2_2 = BaseConv(512, 512, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv4_3_2 = BaseConv(512, 512, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv5_1_2 = BaseConv(512, 512, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv5_2_2 = BaseConv(512, 512, 3, 1, activation=nn.ReLU(), use_bn=True) self.conv5_3_2 = BaseConv(512, 512, 3, 1, activation=nn.ReLU(), use_bn=True) def forward(self, x): x = self.conv1_1_2(x) x = self.conv1_2_2(x) x = self.maxpool(x) x = self.conv2_1_2(x) x = self.conv2_2_2(x) s1 = x x = self.maxpool(x) x = self.conv3_1_2(x) x = self.conv3_2_2(x) x = self.conv3_3_2(x) s2 = x x = self.maxpool(x) x = self.conv4_1_2(x) x = self.conv4_2_2(x) x = self.conv4_3_2(x) s3 = x x = self.maxpool(x) x = self.conv5_1_2(x) x = self.conv5_2_2(x) x = self.conv5_3_2(x) s4 = x return (s1, s2, s3, s4)
.parametrize('sampled_app_train_test, verbose, log_file', [(1000, 0, 'log_file.log')], indirect=['sampled_app_train_test']) def test_logging(capsys, tmpdir, sampled_app_train_test, sampled_app_roles, binary_task, verbose, log_file): (train, _) = sampled_app_train_test if log_file: log_file = os.path.join(tmpdir, 'log_file.log') automl = TabularAutoML(task=binary_task, tuning_params={'max_tuning_iter': 3, 'max_tuning_time': 30}, lgb_params={'default_params': {'num_trees': 5}}) automl.fit_predict(train, roles=sampled_app_roles, verbose=verbose, log_file=log_file) (sys_out, sys_err) = capsys.readouterr() if log_file: assert os.path.exists(log_file) if (verbose == 0): assert (sys_out == '') assert (sys_err == '')
def state_form_y(y, p, u, geometry): return ((dot(grad(y), grad(p)) * geometry.dx) - ((u * p) * geometry.dx))
def get_nmi(thresh=0.5): gnn.eval() Z = F.relu(gnn(x_norm, adj_norm)) Z_pred = (Z.cpu().detach().numpy() > thresh) nmi = nocd.metrics.overlapping_nmi(Z_pred, Z_gt) return nmi
def CalculateEachMoranAuto(ProteinSequence, AAP, AAPName): AAPdic = NormalizeEachAAP(AAP) cds = 0 for i in AALetter: cds = (cds + (ProteinSequence.count(i) * AAPdic[i])) Pmean = (cds / len(ProteinSequence)) cc = [] for i in ProteinSequence: cc.append(AAPdic[i]) K = (_std(cc, ddof=0) ** 2) Result = {} for i in range(1, 31): temp = 0 for j in range((len(ProteinSequence) - i)): temp = (temp + ((AAPdic[ProteinSequence[j]] - Pmean) * (AAPdic[ProteinSequence[(j + i)]] - Pmean))) if ((len(ProteinSequence) - i) == 0): Result[(('MoranAuto' + AAPName) + str(i))] = round(((temp / len(ProteinSequence)) / K), 3) else: Result[(('MoranAuto' + AAPName) + str(i))] = round(((temp / (len(ProteinSequence) - i)) / K), 3) return Result
def is_a(x, n=None) -> bool: if (not isinstance(x, (list, tuple))): return False prev = 1 for (i, elt) in enumerate(x): if ((prev > elt) or (elt > (i + 1))): return False prev = elt return ((n is None) or (n == len(x)))
def unzip_namedshape(namedshape): if isinstance(namedshape, OrderedDict): namedshape = namedshape.items() if ((not hasattr(namedshape, '__iter__')) and (not isinstance(namedshape, tuple))): raise RuntimeError('Expected namedshape to be OrderedDict or iterable of tuples, got: {}'.format(type(namedshape))) if (len(namedshape) == 0): raise RuntimeError('Expected namedshape to non-empty.') return zip(*namedshape)
class EthereumManagerImportEthereumAccount(VirtualFunctionTool): name = 'EthereumManagerImportEthereumAccount' summary = 'Import an existing Ethereum account using its private key.' parameters: List[ArgParameter] = [{'name': 'private_key', 'type': 'string', 'description': 'The private key of the existing Ethereum account.', 'required': True}] returns: List[ArgReturn] = [{'name': 'account_address', 'type': 'string', 'description': 'The Ethereum address of the imported account.'}] exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': 'The private key is invalid.'}]
def read_apf_file(path: str, time_and_val: bool=False) -> Tuple[(str, str, List[Entity], List[Relation], List[Event])]: data = open(path, 'r', encoding='utf-8').read() soup = BeautifulSoup(data, 'lxml-xml') root = soup.find('source_file') source = root['SOURCE'] doc = root.find('document') doc_id = doc['DOCID'] (entity_list, relation_list, event_list) = ([], [], []) for entity in doc.find_all('entity'): entity_id = entity['ID'] entity_type = entity['TYPE'] entity_subtype = entity['SUBTYPE'] for entity_mention in entity.find_all('entity_mention'): mention_id = entity_mention['ID'] mention_type = entity_mention['TYPE'] head = entity_mention.find('head').find('charseq') (start, end, text) = (int(head['START']), int(head['END']), head.text) entity_list.append(Entity(start, end, text, entity_id, mention_id, entity_type, entity_subtype, mention_type)) if time_and_val: for entity in doc.find_all('value'): entity_type = entity['TYPE'] entity_subtype = entity.get('SUBTYPE', None) for entity_mention in entity.find_all('value_mention'): mention_id = entity_mention['ID'] mention_type = 'VALUE' extent = entity_mention.find('extent').find('charseq') (start, end, text) = (int(extent['START']), int(extent['END']), extent.text) entity_list.append(Entity(start, end, text, entity_id, mention_id, entity_type, entity_subtype, mention_type)) for entity in doc.find_all('timex2'): entity_id = entity['ID'] value = entity.get('VAL', None) for entity_mention in entity.find_all('timex2_mention'): mention_id = entity_mention['ID'] mention_type = 'TIME' extent = entity_mention.find('extent').find('charseq') (start, end, text) = (int(extent['START']), int(extent['END']), extent.text) entity_list.append(Entity(start, end, text, entity_id, mention_id, entity_type, entity_subtype, mention_type, value=value)) for relation in doc.find_all('relation'): relation_type = relation['TYPE'] if (relation_type == 'METONYMY'): continue relation_subtype = relation['SUBTYPE'] for relation_mention in relation.find_all('relation_mention'): mention_id = relation_mention['ID'] arg1 = arg2 = None for arg in relation_mention.find_all('relation_mention_argument'): arg_mention_id = arg['REFID'] arg_role = arg['ROLE'] arg_text = arg.find('extent').find('charseq').text if (arg_role == 'Arg-1'): arg1 = RelationArgument(arg_mention_id, arg_role, arg_text) elif (arg_role == 'Arg-2'): arg2 = RelationArgument(arg_mention_id, arg_role, arg_text) if (arg1 and arg2): relation_list.append(Relation(mention_id, relation_type, relation_subtype, arg1, arg2)) for event in doc.find_all('event'): event_id = event['ID'] event_type = event['TYPE'] event_subtype = event['SUBTYPE'] for event_mention in event.find_all('event_mention'): mention_id = event_mention['ID'] trigger = event_mention.find('anchor').find('charseq') (trigger_start, trigger_end) = (int(trigger['START']), int(trigger['END'])) trigger_text = trigger.text event_args = [] for arg in event_mention.find_all('event_mention_argument'): arg_mention_id = arg['REFID'] arg_role = arg['ROLE'] arg_text = arg.find('extent').find('charseq').text event_args.append(EventArgument(arg_mention_id, arg_role, arg_text)) event_list.append(Event(event_id, mention_id, event_type, event_subtype, Span(trigger_start, (trigger_end + 1), trigger_text), event_args)) for entity in entity_list: entity.remove_space() for event in event_list: event.trigger.remove_space() return (doc_id, source, entity_list, relation_list, event_list)
class TFXLMForMultipleChoice(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
class TestFGES(unittest.TestCase): def test(self): directory = os.path.dirname(os.path.abspath(__file__)) data = np.loadtxt(os.path.join(directory, '../data/data_linear.txt'), skiprows=1) try: from pyrca.thirdparty.causallearn.utils.TXT2GeneralGraph import txt2generalgraph graph = txt2generalgraph(os.path.join(directory, '../data/graph.txt')) df = pd.DataFrame(data, columns=[f'X{i}' for i in range(1, 21)]) graph = pd.DataFrame((graph.graph < 0).astype(int), columns=df.columns, index=df.columns) model = FGES(FGES.config_class()) r = model.train(df) except (ImportError, AssertionError) as e: print(str(e)) return diff = np.sum(np.abs((r.values - graph.values))) self.assertLessEqual(diff, 6)
def test_transform_string_self_one_to_one(): this = ak.Array(['one', 'two', 'one', 'nine']) that = this def apply(arrays, **kwargs): layout = ak.operations.ak_to_layout.to_layout(arrays[0]) if (layout.parameter('__array__') is not None): return arrays (this_next, that_next) = ak.operations.ak_transform.transform(apply, this, that, broadcast_parameters_rule='one_to_one') assert (this.layout.parameters == this_next.layout.parameters) assert (that.layout.parameters == that_next.layout.parameters)
def get_commit(): directory = os.path.dirname(sys.argv[0]) return subprocess.Popen('cd {} && git log | head -n 1'.format(directory), shell=True, stdout=subprocess.PIPE).stdout.read().split()[1].decode()
def argsort_for_list(s, reverse=False): return sorted(range(len(s)), key=(lambda k: s[k]), reverse=reverse)
def gen_forward_op_parser(): parser = argparse.ArgumentParser() parser.add_argument('--n', '--nsensors', help='Number of sensors', type=int, default=25) parser.add_argument('--g', '--gridsize', help='Grid size', type=int, default=128) args = parser.parse_args() return args
class StaticAnalyzer(): def __init__(self, source_code, source_tree): self._ast = source_tree self._code_by_line = source_code.splitlines() def batch_size_location(self): extractor = _InputProviderExtractor() extractor.visit(self._ast) function = extractor.function_node if ((function is None) or (len(function.args.args) == 0) or (function.args.args[0].arg != 'batch_size')): return None batch_size_line_number = function.args.args[0].lineno match = END_OF_FUNCTION.search(self._code_by_line[(function.lineno - 1)]) can_mutate = (match is not None) return (batch_size_line_number, can_mutate)
class TestRightMatrixMinimization(unittest.TestCase): def test_minimize_with_large_data(self): input_power_signals_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'three_years_power_signals_d_1.csv')) with open(input_power_signals_file_path) as file: power_signals_d = np.loadtxt(file, delimiter=',') rank_k = 6 weights_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'three_years_weights.csv')) with open(weights_file_path) as file: weights = np.loadtxt(file, delimiter=',') tau = 0.9 mu_r = 1000.0 initial_r0_value_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'three_years_initial_component_r0.csv')) with open(initial_r0_value_file_path) as file: initial_component_r0_value = np.loadtxt(file, delimiter=',') initial_l_cs_value_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'l_cs_value_after_left_matrix_minimization_iteration_1.csv')) with open(initial_l_cs_value_file_path) as file: initial_l_cs_value = np.loadtxt(file, delimiter=',') initial_r_cs_value_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'r_cs_value_after_left_matrix_minimization_iteration_1.csv')) with open(initial_r_cs_value_file_path) as file: initial_r_cs_value = np.loadtxt(file, delimiter=',') initial_beta_value = 0.0 l_cs_value_after_iteration_1_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'l_cs_value_after_right_matrix_minimization_iteration_1.csv')) with open(l_cs_value_after_iteration_1_file_path) as file: expected_l_cs_value = np.loadtxt(file, delimiter=',') r_cs_value_after_iteration_1_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/right_matrix_minimization', 'r_cs_value_after_right_matrix_minimization_iteration_1.csv')) with open(r_cs_value_after_iteration_1_file_path) as file: expected_r_cs_value = np.loadtxt(file, delimiter=',') expected_beta_value = (- 0.) right_matrix_minimization = RightMatrixMinimization(power_signals_d, rank_k, weights, tau, mu_r, solver_type='MOSEK') try: (actual_l_cs_value, actual_r_cs_value, actual_beta_value) = right_matrix_minimization.minimize(initial_l_cs_value, initial_r_cs_value, initial_beta_value, initial_component_r0_value) except cvx.SolverError: self.skipTest((('This test uses MOSEK solver' + 'because default ECOS solver fails with large data. ') + 'Unless MOSEK is installed, this test fails.')) else: np.testing.assert_array_almost_equal(actual_l_cs_value, expected_l_cs_value, decimal=2) np.testing.assert_array_almost_equal(actual_r_cs_value, expected_r_cs_value, decimal=1) np.testing.assert_almost_equal(actual_beta_value, expected_beta_value, decimal=4)
class IncreasingTableaux_shape(IncreasingTableaux): def __init__(self, p, max_entry=None): if (max_entry is None): max_entry = sum(p) super().__init__(max_entry=max_entry, category=FiniteEnumeratedSets()) self.shape = p def __iter__(self): n = sum(self.shape) if (n == 0): (yield self.element_class(self, [])) return list_of_partial_binary_vecs = [[]] list_of_binary_vecs = [] while list_of_partial_binary_vecs: active_vec = list_of_partial_binary_vecs.pop() if (len(active_vec) < self.max_entry): list_of_partial_binary_vecs.append((active_vec + [0])) list_of_partial_binary_vecs.append((active_vec + [1])) else: list_of_binary_vecs.append(tuple(active_vec)) for wt in list_of_binary_vecs: for sst in IncreasingTableaux_shape_weight(self.shape, wt): (yield self.element_class(self, sst)) def __contains__(self, x): return (IncreasingTableaux.__contains__(self, x) and ([len(row) for row in x] == self.shape)) def _repr_(self): return ('Increasing tableaux of shape %s and maximum entry %s' % (str(self.shape), str(self.max_entry)))
class BasicBlock(nn.Module): expansion = 1 def __init__(self, nc, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1): super(BasicBlock, self).__init__() if ((groups != 1) or (base_width != 64)): raise ValueError('BasicBlock only supports groups=1 and base_width=64') if (dilation > 1): raise NotImplementedError('Dilation > 1 not supported in BasicBlock') self.conv1_bn = conv3x3_bn(nc, inplanes, planes, stride) self.relu1 = NonLinear(nc, planes) self.conv2_bn = conv3x3_bn(nc, planes, planes) self.relu2 = NonLinear(nc, planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1_bn(x) out = self.relu1(out) out = self.conv2_bn(out) if (self.downsample is not None): identity = self.downsample(x) out += identity out = self.relu2(out) return out
def main(): common.main(run_treebank, 'lemma', 'lemmatizer', add_lemma_args, sub_argparse=lemmatizer.build_argparse(), build_model_filename=build_model_filename, choose_charlm_method=choose_lemma_charlm)
def compute_rouge(gold_summaries, pred_summaries, use_agregator=True): assert (len(gold_summaries) == len(pred_summaries)), 'Gold and prediction not of same length' (pred_summaries, gold_summaries) = postprocess_text(pred_summaries, gold_summaries) result = metric.compute(predictions=pred_summaries, references=gold_summaries, use_stemmer=True, use_agregator=use_agregator) if use_agregator: return {key: (value.mid.fmeasure * 100) for (key, value) in result.items()} else: return result
def main(argv=None): assert (FLAGS.job_name == 'ps') tf_cluster_dict = {} if (not (FLAGS.ps_hosts == '')): tf_cluster_dict['ps'] = [] for ps in FLAGS.ps_hosts.split(','): tf_cluster_dict['ps'].append(ps) tf_cluster_dict['worker'] = [] for worker in FLAGS.worker_hosts.split(','): tf_cluster_dict['worker'].append(worker) cluster = tf.train.ClusterSpec(tf_cluster_dict) server = tf.train.Server(cluster, job_name='ps', task_index=FLAGS.task_index, protocol=FLAGS.protocol) server.join()
def parse_range_int(range_str): param = map(int, range_str.split(',')) return np.arange(*param)
class SymmetryFunctions(Representation): kind = 'sf' default_context = {'cleanup': True, 'timeout': None} def __init__(self, elems, elemental=[], universal=[], dim=None, context={}): super().__init__(context=context) self.runner_config = prepare_config(elems, elemental, universal, dim) self.config = {'elems': elems, 'elemental': elemental, 'universal': universal, 'dim': dim} def compute(self, data): return compute_symmfs(data, self.runner_config, cleanup=self.context['cleanup'], timeout=self.context['timeout']) def _get_config(self): return self.config def get_infile(self): return make_infile(self.runner_config)
def sqlalchemy_type_to_string(t): if isinstance(t, sqlalchemy.Text): return 'text' elif (isinstance(t, sqlalchemy.Integer) or isinstance(t, sqlalchemy.Float)): return 'number' elif isinstance(t, sqlalchemy.DateTime): return 'time' elif isinstance(t, sqlalchemy.Boolean): return 'boolean' else: return 'others'
def get_f1(answers, predictions, is_equal=get_exact_match): assert ((len(answers) > 0) and (len(predictions) > 0)), (answers, predictions) occupied_answers = [False for _ in answers] occupied_predictions = [False for _ in predictions] for (i, answer) in enumerate(answers): for (j, prediction) in enumerate(predictions): if (occupied_answers[i] or occupied_predictions[j]): continue em = is_equal(answer, prediction) if em: occupied_answers[i] = True occupied_predictions[j] = True assert (np.sum(occupied_answers) == np.sum(occupied_predictions)) (a, b) = (np.mean(occupied_answers), np.mean(occupied_predictions)) if ((a + b) == 0): return 0 return (((2 * a) * b) / (a + b))
def InjectDeviceCopiesAmongNets(nets, blob_to_device_init=None): assert isinstance(nets, list), 'nets {} should be a list of nets.'.format(str(nets)) assert all((isinstance(net, Net) for net in nets)), 'nets {} should be a list of nets.'.format(str(nets)) blob_to_device = (blob_to_device_init or {}) blob_remap = {} new_nets = [] for net in nets: (new_net, blob_to_device) = InjectCrossDeviceCopies(net, blob_to_device=blob_to_device, blob_remap=blob_remap) new_nets.append(new_net) return (new_nets, blob_to_device)
def check_param_groups(model: torch.nn.Module, param_groups: list, verbose=True): num_grouped_params = sum((count_params(group['params'], verbose=False) for group in param_groups)) num_model_params = count_params(model, verbose=False) is_equal = (num_grouped_params == num_model_params) if verbose: if is_equal: logger.info(f'Grouped parameters ({num_grouped_params:,}) == Model parameters ({num_model_params:,})') else: logger.warning(f'Grouped parameters ({num_grouped_params:,}) != Model parameters ({num_model_params:,})') return is_equal
def linear_representation(p, polys): from sage.matrix.constructor import diagonal_matrix R = p.base_ring() M = coefficient_matrix((polys + [p])).augment(diagonal_matrix(R, [1 for each in range((len(polys) + 1))])) M.echelonize() j = (M.ncols() - 1) n = (M.nrows() - 1) offset = (M.ncols() - M.nrows()) return [(M[(n, (offset + each))] / (- M[(n, j)])) for each in range(len(polys))]
def ThomsenGraph(): from sage.graphs.generators.basic import CompleteBipartiteGraph G = CompleteBipartiteGraph(3, 3) G.name('Thomsen graph') return G
def register_Ns3HtCapabilitiesChecker_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::HtCapabilitiesChecker const &', 'arg0')]) return
def parse_normal(example): features = {'Date': tf.FixedLenFeature((1,), tf.int64), 'X': tf.FixedLenFeature(X_SHAPE[1:], tf.float32), 'Y': tf.FixedLenFeature(Y_SHAPE[1:], tf.float32)} data = tf.parse_single_example(example, features) return data
def isEnglish(s): try: s.encode(encoding='utf-8').decode('ascii') except UnicodeDecodeError: return False else: return True
def get_dataset(name, data_dir, size=64, lsun_categories=None, deterministic=False, transform=None): transform = (transforms.Compose([t for t in [transforms.Resize(size), transforms.CenterCrop(size), ((not deterministic) and transforms.RandomHorizontalFlip()), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ((not deterministic) and transforms.Lambda((lambda x: (x + ((1.0 / 128) * torch.rand(x.size()))))))] if (t is not False)]) if (transform == None) else transform) if (name == 'image'): print('Using image labels') dataset = datasets.ImageFolder(data_dir, transform) nlabels = len(dataset.classes) elif (name == 'webp'): print('Using no labels from webp') dataset = CachedImageFolder(data_dir, transform) nlabels = len(dataset.classes) elif (name == 'npy'): dataset = datasets.DatasetFolder(data_dir, npy_loader, ['npy']) nlabels = len(dataset.classes) elif (name == 'cifar10'): dataset = datasets.CIFAR10(root=data_dir, train=True, download=True, transform=transform) nlabels = 10 elif (name == 'stacked_mnist'): dataset = StackedMNIST(data_dir, transform=transforms.Compose([transforms.Resize(size), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])) nlabels = 1000 elif (name == 'lsun'): if (lsun_categories is None): lsun_categories = 'train' dataset = datasets.LSUN(data_dir, lsun_categories, transform) nlabels = len(dataset.classes) elif (name == 'lsun_class'): dataset = datasets.LSUNClass(data_dir, transform, target_transform=(lambda t: 0)) nlabels = 1 else: raise NotImplemented return (dataset, nlabels)
def ngram_perplexity(eval_details, logbase=10.0): counter = sum(eval_details, collections.Counter()) exponent = (counter['neglogprob'] / counter['num_tokens']) perplexity = (logbase ** exponent) return perplexity
def _align_header(header, alignment, width): if (alignment == 'left'): return _padright(width, header) elif (alignment == 'center'): return _padboth(width, header) elif (not alignment): return '{0}'.format(header) else: return _padleft(width, header)
def extract_table(tokens): data = [] row = None cell = None merged_cells = {} colspan = 1 for token in tokens[1:(- 1)]: if (token == '<Tr>'): row = [] elif (token == '</Tr>'): data.append(row) row = None elif (token[:3] in ['<Td', '<Th']): cell = [] try: colspan = (1 if ('colspan' not in token) else int(re.findall('colspan=.*?(\\d+)', token))) except: colspan = 1 elif (token[:4] in ['</Td', '</Th']): row += ([' '.join(cell)] * colspan) cell = None else: if (cell is None): cell = [] if (row is None): row = [] cell.append(token) return data
class EGO4DChoiceDataset(BaseDataset): def __init__(self, *args, split='', **kwargs): assert (split in ['train', 'val', 'test']) self.split = split if (self.split == 'train'): Exception('no train data provided') self.metadata = None self.ans_lab_dict = None if (split == 'train'): names = ['ego4d_choice_train'] elif (split == 'val'): names = ['ego4d_choice_val'] elif (split == 'test'): names = ['ego4d_choice_test'] super().__init__(*args, **kwargs, names=names, text_column_name='unknown', remove_duplicate=False) self._load_metadata() def _load_metadata(self): metadata_dir = './meta_data/ego4d' split_files = {'train': 'mc_val.csv', 'val': 'mc_val.csv', 'test': 'mc_val.csv'} target_split_fp = split_files[self.split] self.metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep=',', header=0, error_bad_lines=False) def _get_video_path(self, sample): rel_video_fp = (eval(sample['question'])[0] + '.mp4') full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp) if (not os.path.exists(full_video_fp)): Exception(IOError) return (full_video_fp, rel_video_fp) def get_raw_video(self, sample): (abs_fp, rel_fp) = self._get_video_path(sample) frame_loc = eval(sample['question'])[1] frame_end = get_video_len(abs_fp) imgs = read_large_frames_decord(abs_fp, frame_loc, frame_end, self.num_frames, mode=self.split) if (imgs is None): raise Exception('Invalid video!', rel_fp) else: return imgs def get_text(self, sample): texts = [] for answer in eval(sample['answers']): text = answer[(- 1)] encoding = self.tokenizer(text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True) texts.append((text, encoding)) return texts def get_answer_label(self, sample): gt_text = eval(sample['question'])[(- 1)] answer_label = 0 for (index, answer) in enumerate(eval(sample['answers'])): if (answer[(- 1)] == gt_text): answer_label = index return answer_label def __getitem__(self, index): sample = self.metadata.iloc[index] video_tensor = self.get_video(sample) qid = index answer = self.get_answer_label(sample) ret = {'video': video_tensor, 'vid_index': index, 'cap_index': index, 'raw_index': index, 'answer': answer} texts = self.get_text(sample) ret['text'] = texts[0] for i in range((self.draw_false_text - 1)): ret.update({f'false_text_{i}': texts[(i + 1)]}) return ret def __len__(self): return len(self.metadata)
def run_reward_conditioning(out_directory: str, parameters: Dict[(str, Union[(int, float, str, bool)])], loaded_policies: Iterable[policies.RvS], attribute_dicts: List[Dict[(str, Union[(int, float, str)])]], env: offline_env.OfflineEnv, trajectory_samples: int=200, file_tag: str='r_target', targets: str='of expert', wandb_run: Optional[Run]=None) -> None: reward_fractions = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2] (reward_vecs, r_attribute_dicts) = ([], []) for (policy, attribute_dict) in zip(loaded_policies, attribute_dicts): reward_vecs += step.eval_reward_conditioning(policy, env, parameters['env_name'], reward_fractions, trajectory_samples=trajectory_samples, targets=targets, average_reward_to_go=(not parameters['cumulative_reward_to_go'])) r_attribute_dicts += [{'Policy': 'RCBC', 'Reward Target': reward_fraction, **attribute_dict} for reward_fraction in reward_fractions] if ('antmaze' not in parameters['env_name']): reward_vecs.append(visualize.get_demonstrator_reward_vec(env)) r_attribute_dicts.append({'Policy': 'Demonstrator'}) visualize.visualize_cumulative_reward(reward_vecs, r_attribute_dicts, parameters, out_directory, x='Reward Target', file_tag=file_tag, wandb_run=wandb_run)
class CenterLoss(nn.Module): def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True): super(CenterLoss, self).__init__() self.num_classes = num_classes self.feat_dim = feat_dim self.use_gpu = use_gpu if self.use_gpu: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) else: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) def forward(self, x, labels): assert (x.size(0) == labels.size(0)), 'features.size(0) is not equal to labels.size(0)' batch_size = x.size(0) distmat = (torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()) distmat.addmm_(1, (- 2), x.float(), self.centers.t()) classes = torch.arange(self.num_classes).long() if self.use_gpu: classes = classes.cuda() labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) mask = labels.eq(classes.expand(batch_size, self.num_classes)) dist = (distmat * mask.float()) loss = (dist.clamp(min=1e-12, max=.0).sum() / batch_size) return loss
def compute_loss(preds, labels, criterion): preds = preds[(labels >= 0)].flatten() labels = labels[(labels >= 0)].float() return criterion(preds, labels)
class CheckboxInput(Input): input_type = 'checkbox' def __call__(self, field, **kwargs): if getattr(field, 'checked', field.data): kwargs['checked'] = True return super(CheckboxInput, self).__call__(field, **kwargs)
def test_checks_as_a_list(testdir, openapi3_base_url): testdir.make_test(f''' schema.base_url = "{openapi3_base_url}" def my_check(response, case): note("CHECKING!") () def test(case): response = case.call() case.validate_response(response, checks=(my_check,), additional_checks=[my_check]) ''') result = testdir.runpytest('-s') result.assert_outcomes(passed=1) assert ('CHECKING!' in result.stdout.str())
def _instance_normalization_v1(x, beta, gamma, channel_axis=1, batch_axis=0, eps=1e-05, output_stat=False): _check_axis(len(x.shape), channel_axis) batch_axis = _check_batch_axis_and_force_list(len(x.shape), batch_axis) adapt_shape = [1 for _ in range(len(x.shape))] for baxis in batch_axis: adapt_shape[baxis] = x.shape[baxis] adapt_shape[channel_axis] = x.shape[channel_axis] adapt_shape = tuple(adapt_shape) if ((beta is not None) and (beta.shape != adapt_shape)): assert (beta.shape[channel_axis] == adapt_shape[channel_axis]), 'channel size of beta: {} != channel size of x ({}).'.format(beta.shape[channel_axis], adapt_shape[channel_axis]) beta = broadcast(beta, shape=adapt_shape) if ((gamma is not None) and (gamma.shape != adapt_shape)): assert (gamma.shape[channel_axis] == adapt_shape[channel_axis]), 'channel size of gamma: {} != channel size of x ({}).'.format(gamma.shape[channel_axis], adapt_shape[channel_axis]) gamma = broadcast(gamma, shape=adapt_shape) return tensor_normalization(x, (batch_axis + [channel_axis]), beta, gamma, eps, output_stat)
def load_from_db(filename, db_type, device_option=None, *args, **kwargs): create_db = core.CreateOperator('CreateDB', [], [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)], db=filename, db_type=db_type) assert workspace.RunOperatorOnce(create_db), 'Failed to create db {}'.format(filename) load_meta_net_def = core.CreateOperator('Load', [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)], [core.BlobReference(predictor_constants.META_NET_DEF)]) assert workspace.RunOperatorOnce(load_meta_net_def) blob = workspace.FetchBlob(predictor_constants.META_NET_DEF) meta_net_def = serde.deserialize_protobuf_struct((blob if isinstance(blob, bytes) else str(blob).encode('utf-8')), metanet_pb2.MetaNetDef) if (device_option is None): device_option = scope.CurrentDeviceScope() if (device_option is not None): for kv in meta_net_def.nets: net = kv.value for op in net.op: op.device_option.CopyFrom(device_option) return meta_net_def
_keyword(color='rgbcolor') (alpha=1, fill=False, thickness=1, rgbcolor='blue', zorder=2, linestyle='solid') def hyperbolic_polygon(pts, model='UHP', resolution=200, **options): from sage.plot.all import Graphics g = Graphics() g._set_extra_kwds(g._extract_kwds_for_show(options)) if (model == 'HM'): from sage.geometry.hyperbolic_space.hyperbolic_interface import HyperbolicPlane from sage.plot.plot3d.implicit_plot3d import implicit_plot3d from sage.symbolic.ring import SR HM = HyperbolicPlane().HM() (x, y, z) = SR.var('x,y,z') arc_points = [] for i in range(0, (len(pts) - 1)): line = HM.get_geodesic(pts[i], pts[(i + 1)]) g = (g + line.plot(color=options['rgbcolor'], thickness=options['thickness'])) arc_points = (arc_points + line._plot_vertices(resolution)) line = HM.get_geodesic(pts[(- 1)], pts[0]) g = (g + line.plot(color=options['rgbcolor'], thickness=options['thickness'])) arc_points = (arc_points + line._plot_vertices(resolution)) if options['fill']: xlist = [p[0] for p in pts] ylist = [p[1] for p in pts] zlist = [p[2] for p in pts] def region(x, y, z): return (_winding_number(arc_points, (x, y, z)) != 0) g = (g + implicit_plot3d(((((x ** 2) + (y ** 2)) - (z ** 2)) == (- 1)), (x, min(xlist), max(xlist)), (y, min(ylist), max(ylist)), (z, 0, max(zlist)), region=region, plot_points=resolution, color=options['rgbcolor'])) else: g.add_primitive(HyperbolicPolygon(pts, model, options)) if ((model == 'PD') or (model == 'KM')): g = (g + circle((0, 0), 1, rgbcolor='black')) g.set_aspect_ratio(1) return g
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False): p_numerators = Counter() p_denominators = Counter() (hyp_lengths, ref_lengths) = (0, 0) assert (len(list_of_references) == len(hypotheses)), 'The number of hypotheses and their reference(s) should be the same ' for (references, hypothesis) in zip(list_of_references, hypotheses): for (i, _) in enumerate(weights, start=1): p_i = modified_precision(references, hypothesis, i) p_numerators[i] += p_i.numerator p_denominators[i] += p_i.denominator hyp_len = len(hypothesis) hyp_lengths += hyp_len ref_lengths += closest_ref_length(references, hyp_len) bp = brevity_penalty(ref_lengths, hyp_lengths) if auto_reweigh: if ((hyp_lengths < 4) and (weights == (0.25, 0.25, 0.25, 0.25))): weights = (((1 / hyp_lengths),) * hyp_lengths) p_n = [Fraction(p_numerators[i], p_denominators[i], _normalize=False) for (i, _) in enumerate(weights, start=1)] if (p_numerators[1] == 0): return 0 if (not smoothing_function): smoothing_function = SmoothingFunction().method1 p_n = smoothing_function(p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths) s = ((w_i * math.log(p_i)) for (w_i, p_i) in zip(weights, p_n)) s = (bp * math.exp(math.fsum(s))) return s
class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
class EpochBasedTrainer(BaseTrainer): def __init__(self, cfg, parser=None, cudnn_deterministic=True, autograd_anomaly_detection=False, save_all_snapshots=True, run_grad_check=False, grad_acc_steps=1): super().__init__(cfg, parser=parser, cudnn_deterministic=cudnn_deterministic, autograd_anomaly_detection=autograd_anomaly_detection, save_all_snapshots=save_all_snapshots, run_grad_check=run_grad_check, grad_acc_steps=grad_acc_steps) self.max_epoch = cfg.optim.max_epoch self.best_val_loss = sys.float_info.max def before_train_step(self, epoch, iteration, data_dict) -> None: pass def before_val_step(self, epoch, iteration, data_dict) -> None: pass def after_train_step(self, epoch, iteration, data_dict, output_dict, result_dict) -> None: pass def after_val_step(self, epoch, iteration, data_dict, output_dict, result_dict) -> None: pass def before_train_epoch(self, epoch) -> None: pass def before_val_epoch(self, epoch) -> None: pass def after_train_epoch(self, epoch) -> None: pass def after_val_epoch(self, epoch) -> None: pass def train_step(self, epoch, iteration, data_dict) -> Tuple[(Dict, Dict)]: pass def val_step(self, epoch, iteration, data_dict) -> Tuple[(Dict, Dict)]: pass def after_backward(self, epoch, iteration, data_dict, output_dict, result_dict) -> None: pass def check_gradients(self, epoch, iteration, data_dict, output_dict, result_dict): if (not self.run_grad_check): return if (not self.check_invalid_gradients()): self.logger.error('Epoch: {}, iter: {}, invalid gradients.'.format(epoch, iteration)) torch.save(data_dict, 'data.pth') torch.save(self.model, 'model.pth') self.logger.error('Data_dict and model snapshot saved.') ipdb.set_trace() def train_epoch(self): if self.distributed: self.train_loader.sampler.set_epoch(self.epoch) self.before_train_epoch(self.epoch) self.optimizer.zero_grad() total_iterations = len(self.train_loader) for (iteration, data_dict) in enumerate(self.train_loader): self.inner_iteration = (iteration + 1) self.iteration += 1 data_dict = torch_util.to_cuda(data_dict) self.before_train_step(self.epoch, self.inner_iteration, data_dict) self.timer.add_prepare_time() (output_dict, result_dict) = self.train_step(self.epoch, self.inner_iteration, data_dict) result_dict['loss'].backward(retain_graph=True) self.after_backward(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict) self.check_gradients(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict) self.optimizer_step(self.inner_iteration) self.timer.add_process_time() self.after_train_step(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict) result_dict = self.release_tensors(result_dict) self.summary_board.update_from_result_dict(result_dict) if ((self.inner_iteration % self.log_steps) == 0): summary_dict = self.summary_board.summary() message = get_log_string(result_dict=summary_dict, epoch=self.epoch, max_epoch=self.max_epoch, iteration=self.inner_iteration, max_iteration=total_iterations, lr=self.get_lr(), timer=self.timer) self.logger.info(message) self.write_event('train', summary_dict, self.iteration) torch.cuda.empty_cache() self.after_train_epoch(self.epoch) message = get_log_string(self.summary_board.summary(), epoch=self.epoch, timer=self.timer) self.logger.critical(message) if (self.scheduler is not None): self.scheduler.step() self.save_snapshot(f'epoch-{self.epoch}.pth.tar') def inference_epoch(self): self.set_eval_mode() self.before_val_epoch(self.epoch) summary_board = SummaryBoard(adaptive=True) timer = Timer() total_iterations = len(self.val_loader) pbar = tqdm.tqdm(enumerate(self.val_loader), total=total_iterations) for (iteration, data_dict) in pbar: self.inner_iteration = (iteration + 1) data_dict = torch_util.to_cuda(data_dict) self.before_val_step(self.epoch, self.inner_iteration, data_dict) timer.add_prepare_time() (output_dict, result_dict) = self.val_step(self.epoch, self.inner_iteration, data_dict) torch.cuda.synchronize() timer.add_process_time() self.after_val_step(self.epoch, self.inner_iteration, data_dict, output_dict, result_dict) result_dict = self.release_tensors(result_dict) summary_board.update_from_result_dict(result_dict) message = get_log_string(result_dict=summary_board.summary(), epoch=self.epoch, iteration=self.inner_iteration, max_iteration=total_iterations, timer=timer) pbar.set_description(message) torch.cuda.empty_cache() summary_dict = summary_board.summary() message = ('[Val] ' + get_log_string(summary_dict, epoch=self.epoch, timer=timer)) val_loss = result_dict['loss'] if (val_loss < self.best_val_loss): self.best_val_loss = val_loss self.save_snapshot(f'best_snapshot.pth.tar') self.logger.critical(message) self.write_event('val', summary_dict, self.epoch) self.set_train_mode() def set_train_mode(self): self.training = True self.model.train() torch.set_grad_enabled(True) def run(self): assert (self.train_loader is not None) assert (self.val_loader is not None) if self.args.resume: self.load_snapshot(osp.join(self.snapshot_dir, 'snapshot.pth.tar')) elif (self.args.snapshot is not None): self.load_snapshot(self.args.snapshot) self.set_train_mode() while (self.epoch < self.max_epoch): self.epoch += 1 self.train_epoch() self.inference_epoch()
class Weibull(TransformedDistribution): arg_constraints = {'scale': constraints.positive, 'concentration': constraints.positive} support = constraints.positive def __init__(self, scale, concentration, validate_args=None): (self.scale, self.concentration) = broadcast_all(scale, concentration) self.concentration_reciprocal = self.concentration.reciprocal() base_dist = Exponential(torch.ones_like(self.scale)) transforms = [PowerTransform(exponent=self.concentration_reciprocal), AffineTransform(loc=0, scale=self.scale)] super(Weibull, self).__init__(base_dist, transforms, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Weibull, _instance) new.scale = self.scale.expand(batch_shape) new.concentration = self.concentration.expand(batch_shape) new.concentration_reciprocal = new.concentration.reciprocal() base_dist = self.base_dist.expand(batch_shape) transforms = [PowerTransform(exponent=new.concentration_reciprocal), AffineTransform(loc=0, scale=new.scale)] super(Weibull, new).__init__(base_dist, transforms, validate_args=False) new._validate_args = self._validate_args return new def mean(self): return (self.scale * torch.exp(torch.lgamma((1 + self.concentration_reciprocal)))) def variance(self): return (self.scale.pow(2) * (torch.exp(torch.lgamma((1 + (2 * self.concentration_reciprocal)))) - torch.exp((2 * torch.lgamma((1 + self.concentration_reciprocal)))))) def entropy(self): return (((euler_constant * (1 - self.concentration_reciprocal)) + torch.log((self.scale * self.concentration_reciprocal))) + 1)
def train_asr_config(config, name, parameter_dict=None): asr_train_job = RETURNNTrainingFromFile(config, parameter_dict=parameter_dict, mem_rqmt=16) asr_train_job.add_alias(('asr_training/' + name)) asr_train_job.rqmt['time'] = 167 asr_train_job.rqmt['cpu'] = 8 tk.register_output((('asr_training/' + name) + '_model'), asr_train_job.model_dir) tk.register_output((('asr_training/' + name) + '_training-scores'), asr_train_job.learning_rates) return asr_train_job
def _has_probe(args): for arg in args: if is_probe(arg): return True return False
.script def bias_gelu_back(g, y, bias): x = (bias + y) tanh_out = torch.tanh(((0. * x) * (1 + ((0.044715 * x) * x)))) ff = (((0.5 * x) * ((1 - (tanh_out * tanh_out)) * (0. + ((0. * x) * x)))) + (0.5 * (1 + tanh_out))) grad_y = (ff * g) return (grad_y.to(dtype=y.dtype), grad_y.sum(dim=0, dtype=bias.dtype))
def count_all_paths_with_label_seq_partly_dominated(fsa: Fsa, label_seq_template: str, dom_label: str, n: typing.Union[(int, sympy.Symbol)], factor: typing.Union[(int, float, sympy.Symbol)], fixed_factor_power: typing.Optional[typing.Union[(sympy.Symbol, sympy.Expr)]]=None) -> typing.Dict[(typing.Tuple[(str, str)], typing.Dict[(str, sympy.Expr)])]: labels = fsa.get_labels() assert (dom_label in labels) input_labels = set(label_seq_template) res = {} for input_label in input_labels: count_frames_in_template = 0 for (i, input_label_) in enumerate(label_seq_template): if (input_label_ == input_label): count_frames_in_template += 1 parts = [] parts_by_state = {} class Part(): def __init__(self, state: int): self.start_state = state self.end_state = state self.arcs = set() self.loops_in_states = set() def add_arc(self, arc_: Arc): assert (self.start_state <= arc_.source_state <= self.end_state) self.arcs.add(arc_) self.end_state = max(self.end_state, arc_.target_state) if (arc_.source_state == arc_.target_state): self.loops_in_states.add(arc_.source_state) def have_loop(self): return bool(self.loops_in_states) def __repr__(self): return ('FsaPart{%s}' % self.arcs) rem_part = None for arc in sorted(fsa.arcs, key=(lambda arc_: (arc_.source_state, arc_.target_state, arc_.label))): if (arc.label == dom_label): if (arc.source_state not in parts_by_state): part = Part(state=arc.source_state) parts.append(part) parts_by_state[arc.source_state] = part else: part = parts_by_state[arc.source_state] part.add_arc(arc) parts_by_state[arc.target_state] = part else: if (rem_part is None): rem_part = Part(state=arc.source_state) rem_part.add_arc(arc) assert (len(parts) == 2) assert all([part.have_loop() for part in parts]) assert (parts[0].start_state == 0) assert ((parts[(- 1)].start_state in fsa.final_states) and (parts[(- 1)].end_state in fsa.final_states)) assert (label_seq_template == 'BaaB') for input_label_ in input_labels: if (input_label_ != input_label): res_ = {label: 0 for label in labels} res[(input_label, input_label_)] = res_ def _add(): if (input_label == BlankLabel): blank_num_frames_input = (blank_num_frames_p1 + blank_num_frames_p4) else: blank_num_frames_input = blank_num_frames_p23 if (input_label_ == BlankLabel): blank_num_frames_input_ = (blank_num_frames_p1 + blank_num_frames_p4) else: blank_num_frames_input_ = blank_num_frames_p23 label1_num_frames_input_ = ((2 * n) - blank_num_frames_input_) if (fixed_factor_power is not None): blank_num_frames_input = sympy.sympify(blank_num_frames_input) syms = set(blank_num_frames_input.free_symbols) if syms.issubset({n}): eq = sympy.Eq(fixed_factor_power, blank_num_frames_input) sum_blank = sympy.Sum(sympy.Sum(blank_num_frames_input_, label1_end_frame_range), label1_start_frame_range) sum_label = sympy.Sum(sympy.Sum(label1_num_frames_input_, label1_end_frame_range), label1_start_frame_range) if (eq == sympy.sympify(True)): res_[BlankLabel] += sum_blank res_[Label1] += sum_label else: res_[BlankLabel] += sympy.Piecewise((sum_blank, eq), (0, True)) res_[Label1] += sympy.Piecewise((sum_label, eq), (0, True)) elif syms.issubset({n, label1_start_frame}): (label1_start_frame_,) = sympy.solve((blank_num_frames_input - fixed_factor_power), label1_start_frame) in_range = sympy.And(sympy.Ge(label1_start_frame_, label1_start_frame_range[1]), sympy.Le(label1_start_frame_, label1_start_frame_range[2])).simplify() sum_blank = sympy.Sum(blank_num_frames_input_, label1_end_frame_range) sum_label = sympy.Sum(label1_num_frames_input_, label1_end_frame_range) sum_blank = sum_blank.subs(label1_start_frame, label1_start_frame_) sum_label = sum_label.subs(label1_start_frame, label1_start_frame_) res_[BlankLabel] += sympy.Piecewise((sum_blank, in_range), (0, True)) res_[Label1] += sympy.Piecewise((sum_label, in_range), (0, True)) elif syms.issubset({n, label1_end_frame}): (label1_end_frame_,) = sympy.solve((blank_num_frames_input - fixed_factor_power), label1_end_frame) assert set(label1_end_frame_.free_symbols).issubset({n, fixed_factor_power}) in_range = sympy.And(sympy.Ge(label1_end_frame_, label1_end_frame_range[1]), sympy.Le(label1_end_frame_, label1_end_frame_range[2])).simplify() assert set(in_range.free_symbols).issubset({n, fixed_factor_power}) sum_blank = sympy.Sum(blank_num_frames_input_.subs(label1_end_frame, label1_end_frame_), label1_start_frame_range) sum_label = sympy.Sum(label1_num_frames_input_.subs(label1_end_frame, label1_end_frame_), label1_start_frame_range) res_[BlankLabel] += sympy.Piecewise((sum_blank, in_range), (0, True)) res_[Label1] += sympy.Piecewise((sum_label, in_range), (0, True)) else: assert syms.issubset({n, label1_start_frame, label1_end_frame}) assert set(sympy.sympify(blank_num_frames_input_).free_symbols).issubset({n}) assert set(sympy.sympify(label1_num_frames_input_).free_symbols).issubset({n}) (label1_end_frame_,) = sympy.solve((blank_num_frames_input - fixed_factor_power), label1_end_frame) assert set(label1_end_frame_.free_symbols).issubset({n, fixed_factor_power, label1_start_frame}) total_cond = True rs = [] r1 = sympy.Ge(label1_end_frame_, label1_end_frame_range[1]).simplify() r2 = sympy.Le(label1_end_frame_, label1_end_frame_range[2]).simplify() if (label1_start_frame not in r1.free_symbols): total_cond = sympy.And(total_cond, r1) else: rs.append(sympy.solve_univariate_inequality(r1, label1_start_frame, relational=False)) rs.append(sympy.solve_univariate_inequality(r2, label1_start_frame, relational=False)) rs.append(sympy.Interval(label1_start_frame_range[1], label1_start_frame_range[2])) r3 = 0 r4 = ((4 * n) - 1) for r_interval in rs: assert isinstance(r_interval, sympy.Interval) r3 = (- sympy.Min((- r3), (- r_interval.start))) r4 = sympy.Min(r4, r_interval.end) _c = fixed_factor_power c = ((r4 - r3) + 1) c = c.replace(sympy.Min(0, (((- _c) + n) - 1)), (sympy.Min(_c, (n - 1)) - _c)) assert isinstance(c, sympy.Expr) assert ((c.count(sympy.Min) == 1) and (c.count(sympy.Max) == 0)) (q,) = list(c.find(sympy.Min)) assert isinstance(q, sympy.Min) assert (len(q.args) == 2) min_args = list(q.args) case_cond = [sympy.Le(min_args[0], min_args[1]), sympy.Ge(min_args[0], min_args[1])] cases_blank = [] cases_label = [] for i_ in range(2): c_ = c.replace(q, min_args[i_]) assert (c_.count(sympy.Min) == 0) sum_blank = (blank_num_frames_input_ * c_) sum_label = (label1_num_frames_input_ * c_) cond = sympy.And(total_cond, case_cond[i_], sympy.Ge(c_, 0)).simplify() cond = cond.replace((((_c - (2 * n)) >= (- 1)) & ((_c - (2 * n)) <= (- 1))), sympy.Eq(_c, ((2 * n) - 1))) cond = cond.replace((sympy.Eq(_c, (2 * n)) & sympy.Eq((_c - (2 * n)), (- 1))), False) cond = cond.simplify() if (cond != sympy.sympify(False)): cases_blank.append((sum_blank, cond)) cases_label.append((sum_label, cond)) cases_blank.append((0, True)) cases_label.append((0, True)) sum_blank = sympy.Piecewise(*cases_blank) sum_label = sympy.Piecewise(*cases_label) res_[BlankLabel] += sum_blank res_[Label1] += sum_label else: factor_ = sympy.Pow(factor, blank_num_frames_input) res_[BlankLabel] += sympy.Sum(sympy.Sum((blank_num_frames_input_ * factor_), label1_end_frame_range), label1_start_frame_range) res_[Label1] += sympy.Sum(sympy.Sum((label1_num_frames_input_ * factor_), label1_end_frame_range), label1_start_frame_range) label1_start_frame = sympy.Symbol('label1_start_frame', integer=True) label1_end_frame = sympy.Symbol('label1_end_frame', integer=True) if True: label1_start_frame_range = (label1_start_frame, 0, (n - 1)) label1_end_frame_range = (label1_end_frame, label1_start_frame, (n - 1)) label1_num_frames_p1 = ((label1_end_frame - label1_start_frame) + 1) blank_num_frames_p1 = (n - label1_num_frames_p1) blank_num_frames_p23 = (2 * n) blank_num_frames_p4 = n _add() label1_end_frame_range = (label1_end_frame, n, ((3 * n) - 1)) label1_num_frames_p1 = (n - label1_start_frame) blank_num_frames_p1 = (n - label1_num_frames_p1) label1_num_frames_p23 = ((label1_end_frame - n) + 1) blank_num_frames_p23 = ((2 * n) - label1_num_frames_p23) blank_num_frames_p4 = n _add() label1_end_frame_range = (label1_end_frame, (3 * n), ((4 * n) - 1)) label1_num_frames_p1 = (n - label1_start_frame) blank_num_frames_p1 = (n - label1_num_frames_p1) blank_num_frames_p23 = 0 label1_num_frames_p4 = ((label1_end_frame - (3 * n)) + 1) blank_num_frames_p4 = (n - label1_num_frames_p4) _add() if True: label1_start_frame_range = (label1_start_frame, n, ((3 * n) - 1)) blank_num_frames_p1 = n label1_end_frame_range = (label1_end_frame, label1_start_frame, ((3 * n) - 1)) label1_num_frames_p23 = ((label1_end_frame - label1_start_frame) + 1) blank_num_frames_p23 = ((2 * n) - label1_num_frames_p23) blank_num_frames_p4 = n _add() label1_end_frame_range = (label1_end_frame, (3 * n), ((4 * n) - 1)) blank_num_frames_p23 = (label1_start_frame - n) label1_num_frames_p4 = ((label1_end_frame - (3 * n)) + 1) blank_num_frames_p4 = (n - label1_num_frames_p4) _add() if True: label1_start_frame_range = (label1_start_frame, (3 * n), ((4 * n) - 1)) blank_num_frames_p1 = n blank_num_frames_p23 = (2 * n) label1_end_frame_range = (label1_end_frame, label1_start_frame, ((4 * n) - 1)) label1_num_frames_p4 = ((label1_end_frame - label1_start_frame) + 1) blank_num_frames_p4 = (n - label1_num_frames_p4) _add() if isinstance(factor, int): for _ in range(5): for label in labels: x = res_[label] if (fixed_factor_power is not None): x = sympy_utils.simplify_and(x) x = x.simplify() res_[label] = x return res
def validate_nl_brin(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(brin.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(brin.is_valid) else: return df.applymap(brin.is_valid) return brin.is_valid(df)
def match_ckpts_and_vaes(search_space): if (search_space.get('rl_variant.num_rl_seeds_per_vae', None) is not None): num_rl_seeds_per_vae = search_space['rl_variant.num_rl_seeds_per_vae'][0] search_space['rl_variant.ckpt_and_vae_path'] = [] ckpt_counter = 0 for vae_path in search_space['rl_variant.vae_path']: for i in range(num_rl_seeds_per_vae): search_space['rl_variant.ckpt_and_vae_path'].append([search_space['rl_variant.ckpt'][ckpt_counter], vae_path]) ckpt_counter += 1 if ('rl_variant.ckpt_and_vae_path' in search_space): del search_space['rl_variant.ckpt'] del search_space['rl_variant.vae_path']
def sqrt(x, *args, **kwds): if isinstance(x, float): return math.sqrt(x) elif (type(x).__module__ == 'numpy'): from numpy import sqrt return sqrt(x) try: return x.sqrt(*args, **kwds) except (AttributeError, TypeError): pass return _do_sqrt(x, *args, **kwds)
def flatten_hessian(cost, wrt, consider_constant=None, disconnected_inputs='raise', block_diagonal=True): import theano from theano.tensor import arange import theano.tensor as TT from theano import Variable from theano import grad assert isinstance(cost, Variable), 'tensor.hessian expects a Variable as `cost`' assert (cost.ndim == 0), 'tensor.hessian expects a 0 dimensional variable as `cost`' using_list = isinstance(wrt, list) using_tuple = isinstance(wrt, tuple) if isinstance(wrt, (list, tuple)): wrt = list(wrt) else: wrt = [wrt] hessians = [] if (not block_diagonal): expr = TT.concatenate([grad(cost, input, consider_constant=consider_constant, disconnected_inputs=disconnected_inputs).flatten() for input in wrt]) for input in wrt: assert isinstance(input, Variable), 'tensor.hessian expects a (list of) Variable as `wrt`' if block_diagonal: expr = grad(cost, input, consider_constant=consider_constant, disconnected_inputs=disconnected_inputs).flatten() (hess, updates) = theano.scan((lambda i, y, x: grad(y[i], x, consider_constant=consider_constant, disconnected_inputs='ignore').flatten()), sequences=arange(expr.shape[0]), non_sequences=[expr, input]) assert (not updates), 'Scan has returned a list of updates. This should not happen! Report this to theano-users (also include the script that generated the error)' hessians.append(hess) if block_diagonal: from theano.gradient import format_as return format_as(using_list, using_tuple, hessians) else: return TT.concatenate(hessians, axis=1)
def uppercase_range(code1, code2): code3 = max(code1, ord('a')) code4 = min(code2, (ord('z') + 1)) if (code3 < code4): d = (ord('A') - ord('a')) return ((code3 + d), (code4 + d)) else: return None
('/api/stream/<string:model_name>', methods=['GET', 'POST']) def stream(model_name): data_type = request.args.get('data', 'train') conditional = (request.args.get('conditional', 'true') == 'true') bins = int(request.args.get('bins', '20')) if (request.method == 'GET'): ret_json = get_stream(model_name, data_type, conditional=conditional, bins=bins) else: filters = parse_filter(request.get_json()) ret_json = get_stream(model_name, data_type, conditional=conditional, bins=bins, filters=filters) if (ret_json is None): abort(404) else: return ret_json
class DDIMSampler(object): def __init__(self, model, schedule='linear', **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if (type(attr) == torch.Tensor): if (attr.device != torch.device('cuda')): attr = attr.to(torch.device('cuda')) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep' to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device)) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu())))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu())))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu())))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1)))) (ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas))) sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev))))) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) _grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, ucg_schedule=None, **kwargs): if (conditioning is not None): if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if (cbs != batch_size): print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}') elif isinstance(conditioning, list): for ctmp in conditioning: if (ctmp.shape[0] != batch_size): print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}') elif (conditioning.shape[0] != batch_size): print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}') self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) (C, H, W) = shape size = (batch_size, C, H, W) print(f'Data shape for DDIM sampling is {size}, eta {eta}') (samples, intermediates) = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ucg_schedule=ucg_schedule) return (samples, intermediates) _grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, ucg_schedule=None): device = self.model.betas.device b = shape[0] if (x_T is None): img = torch.randn(shape, device=device) else: img = x_T if (timesteps is None): timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps) elif ((timesteps is not None) and (not ddim_use_original_steps)): subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1) timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = (reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)) total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0]) print(f'Running DDIM Sampling with {total_steps} timesteps') iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for (i, step) in enumerate(iterator): index = ((total_steps - i) - 1) ts = torch.full((b,), step, device=device, dtype=torch.long) if (mask is not None): assert (x0 is not None) img_orig = self.model.q_sample(x0, ts) img = ((img_orig * mask) + ((1.0 - mask) * img)) if (ucg_schedule is not None): assert (len(ucg_schedule) == len(time_range)) unconditional_guidance_scale = ucg_schedule[i] outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold) (img, pred_x0) = outs if callback: callback(i) if img_callback: img_callback(pred_x0, i) if (((index % log_every_t) == 0) or (index == (total_steps - 1))): intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return (img, intermediates) _grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None): (b, *_, device) = (*x.shape, x.device) task_name = c['task'] if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)): model_output = self.model.apply_model(x, t, c) else: x_in = torch.cat(([x] * 2)) t_in = torch.cat(([t] * 2)) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if (k == 'task'): continue if isinstance(c[k], list): c_in[k] = [torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) elif isinstance(c, list): c_in = list() assert isinstance(unconditional_conditioning, list) for i in range(len(c)): c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) else: c_in = torch.cat([unconditional_conditioning, c]) c_in['task'] = task_name (model_uncond, model_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2) model_output = (model_uncond + (unconditional_guidance_scale * (model_t - model_uncond))) if (self.model.parameterization == 'v'): e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) else: e_t = model_output if (score_corrector is not None): assert (self.model.parameterization == 'eps'), 'not implemented' e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas) alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev) sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas) sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas) a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device) if (self.model.parameterization != 'v'): pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt()) else: pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) if quantize_denoised: (pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0) if (dynamic_threshold is not None): raise NotImplementedError() dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t) noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature) if (noise_dropout > 0.0): noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise) return (x_prev, pred_x0) _grad() def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): num_reference_steps = (self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]) assert (t_enc <= num_reference_steps) num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc='Encoding Image'): t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) if (unconditional_guidance_scale == 1.0): noise_pred = self.model.apply_model(x_next, t, c) else: assert (unconditional_conditioning is not None) (e_t_uncond, noise_pred) = torch.chunk(self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c))), 2) noise_pred = (e_t_uncond + (unconditional_guidance_scale * (noise_pred - e_t_uncond))) xt_weighted = ((alphas_next[i] / alphas[i]).sqrt() * x_next) weighted_noise_pred = ((alphas_next[i].sqrt() * (((1 / alphas_next[i]) - 1).sqrt() - ((1 / alphas[i]) - 1).sqrt())) * noise_pred) x_next = (xt_weighted + weighted_noise_pred) if (return_intermediates and ((i % (num_steps // return_intermediates)) == 0) and (i < (num_steps - 1))): intermediates.append(x_next) inter_steps.append(i) elif (return_intermediates and (i >= (num_steps - 2))): intermediates.append(x_next) inter_steps.append(i) if callback: callback(i) out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} if return_intermediates: out.update({'intermediates': intermediates}) return (x_next, out) _grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if (noise is None): noise = torch.randn_like(x0) return ((extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0) + (extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)) _grad() def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, use_original_steps=False, callback=None): timesteps = (np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps) timesteps = timesteps[:t_start] time_range = np.flip(timesteps) total_steps = timesteps.shape[0] print(f'Running DDIM Sampling with {total_steps} timesteps') iterator = tqdm(time_range, desc='Decoding image', total=total_steps) x_dec = x_latent for (i, step) in enumerate(iterator): index = ((total_steps - i) - 1) ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) (x_dec, _) = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) if callback: callback(i) return x_dec
class BaseTrainer(): def __init__(self, model, optimizer, training_loss, deterministic, scheduler=None, supervised=True): self.deterministic = deterministic if deterministic: self.__set_deterministic() self.model = model if torch.cuda.is_available(): self.model.cuda() self.optimizer = optimizer self.training_loss = training_loss self.scheduler = scheduler self.supervised = supervised if (not os.path.exists('checkpoints')): os.makedirs('checkpoints') def create_optimizer(self, net, optimizer_params): if (optimizer_params['algorithm'] == 'sgd'): return optim.SGD(filter((lambda p: p.requires_grad), net.parameters()), lr=(optimizer_params['learning_rate'] if ('learning_rate' in optimizer_params) else 0.001), momentum=(optimizer_params['momentum'] if ('momentum' in optimizer_params) else 0.9), weight_decay=(optimizer_params['weight_decay'] if ('weight_decay' in optimizer_params) else 0.0005)) elif (optimizer_params['algorithm'] == 'adam'): return optim.Adam(filter((lambda p: p.requires_grad), net.parameters()), lr=(optimizer_params['learning_rate'] if ('learning_rate' in optimizer_params) else 0.001), weight_decay=(optimizer_params['weight_decay'] if ('weight_decay' in optimizer_params) else 0.0005)) else: return optim.SGD(filter((lambda p: p.requires_grad), net.parameters()), lr=0.001, momentum=0.9, weight_decay=0.0005) def __set_deterministic(self): import random import numpy as np torch.manual_seed(42) torch.cuda.manual_seed_all(42) random.seed(42) np.random.seed(42) torch.backends.cudnn.deterministic = True def set_supervised(self, supervised): self.supervised = supervised def set_training_loss(self, training_loss): self.training_loss = training_loss def train(self, dataloader, epochs, print_frequency=50, max_gradient=None, checkpoint_file=None, checkpoint_frequency=50): self.model.train() epoch_losses = [] for epoch in range(epochs): epoch_loss = 0.0 running_loss = 0.0 for (i, data) in enumerate(dataloader): self.optimizer.zero_grad() inputs = data['input'] if torch.cuda.is_available(): if isinstance(inputs, list): inputs = [element.cuda() for element in inputs] else: inputs = inputs.cuda() if isinstance(inputs, list): output_approx = self.model(*inputs) else: output_approx = self.model(inputs) if self.supervised: outputs = data['output'] if torch.cuda.is_available(): if isinstance(outputs, list): outputs = [element.cuda() for element in outputs] else: outputs = outputs.cuda() if isinstance(outputs, list): loss = self.training_loss(*output_approx, *outputs) else: loss = self.training_loss(output_approx, outputs) elif isinstance(inputs, list): loss = self.training_loss(*output_approx, *inputs) else: loss = self.training_loss(output_approx, inputs) loss.backward() if (max_gradient is not None): torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_gradient, norm_type=2) self.optimizer.step() epoch_loss += loss.item() running_loss += loss.item() if ((i % print_frequency) == (print_frequency - 1)): print((('[%d, %5d] loss: ' % ((epoch + 1), (i + 1))) + str((running_loss / print_frequency)))) running_loss = 0.0 if ((checkpoint_file is not None) and ((epoch % checkpoint_frequency) == (checkpoint_frequency - 1))): self.save_checkpoint(checkpoint_file, epoch=(epoch + 1), save_optimizer=True) epoch_losses += [(epoch_loss / len(dataloader))] if (self.scheduler is not None): self.scheduler.step() if (checkpoint_file is not None): self.save_checkpoint(checkpoint_file, epoch=epochs, save_optimizer=True) print('Training finished') return epoch_losses def evaluate(self, inputs): self.model.eval() inputs.requires_grad = False return self.model(inputs) def save_checkpoint(self, filename, epoch=None, save_optimizer=True): state = {'state_dict': self.model.state_dict()} if save_optimizer: state['optimizer'] = self.optimizer.state_dict() if (epoch is not None): state['epoch'] = epoch torch.save(state, ('checkpoints/' + filename)) def load_checkpoint(self, filename, load_optimizer=True, load_scheduler=True): checkpoint = torch.load(filename, map_location=(lambda storage, location: storage)) self.model.load_state_dict(checkpoint['state_dict']) if torch.cuda.is_available(): self.model.cuda() if load_optimizer: self.optimizer.load_state_dict(checkpoint['optimizer']) if (load_scheduler and (self.scheduler is not None) and ('epoch' in checkpoint)): self.scheduler.last_epoch = checkpoint['epoch'] if ('epoch' in checkpoint): return checkpoint['epoch']
class TextVideoDataset(Dataset): def __init__(self, dataset_name, text_params, video_params, data_dir, meta_dir=None, split='train', tsfms=None, cut=None, subsample=1, sliding_window_stride=(- 1), reader='decord', neg_param=None): self.dataset_name = dataset_name self.text_params = text_params self.video_params = video_params self.data_dir = os.path.expandvars(data_dir) if (meta_dir is not None): self.meta_dir = os.path.expandvars(meta_dir) else: self.meta_dir = self.data_dir self.split = split self.transforms = tsfms self.cut = cut self.subsample = subsample self.sliding_window_stride = sliding_window_stride self.video_reader = video_reader[reader] self.label_type = 'caption' self.neg_param = neg_param self._load_metadata() if (self.sliding_window_stride != (- 1)): if (self.split != 'test'): raise ValueError('Fixing frame sampling is for test time only. can remove but...') self._fix_temporal_samples() def _load_metadata(self): raise NotImplementedError('Metadata loading must be implemented by subclass') def _get_video_path(self, sample): raise NotImplementedError('Get video path function must be implemented by subclass') def _get_caption(self, sample): raise NotImplementedError('Get caption function must be implemented by subclass') def _get_video_lens(self): vlen_li = [] for (idx, row) in self.metadata.iterrows(): video_path = self._get_video_path(row)[0] vlen_li.append(get_video_len(video_path)) return vlen_li def _fix_temporal_samples(self): self.metadata['vlen'] = self._get_video_lens() self.metadata['frame_intervals'] = self.metadata['vlen'].apply((lambda x: np.linspace(start=0, stop=x, num=(min(x, self.video_params['num_frames']) + 1)).astype(int))) self.metadata['fix_start'] = self.metadata['frame_intervals'].apply((lambda x: np.arange(0, int((x[(- 1)] / len((x - 1)))), self.sliding_window_stride))) self.metadata = self.metadata.explode('fix_start') def __len__(self): return len(self.metadata) def __getitem__(self, item): item = (item % len(self.metadata)) sample = self.metadata.iloc[item] (video_fp, rel_fp) = self._get_video_path(sample) caption = self._get_caption(sample) video_loading = self.video_params.get('loading', 'strict') frame_sample = 'rand' fix_start = None if (self.split == 'test'): frame_sample = 'uniform' if (self.sliding_window_stride != (- 1)): fix_start = sample['fix_start'] try: if os.path.isfile(video_fp): (imgs, idxs) = self.video_reader(video_fp, self.video_params['num_frames'], frame_sample, fix_start=fix_start) else: print(f'Warning: missing video file {video_fp}.') assert False except Exception as e: if (video_loading == 'strict'): raise ValueError(f'Video loading failed for {video_fp}, video loading for this dataset is strict.') from e else: imgs = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0)) imgs = transforms.ToTensor()(imgs).unsqueeze(0) if (self.transforms is not None): if (self.video_params['num_frames'] > 1): imgs = imgs.transpose(0, 1) imgs = self.transforms(imgs) imgs = imgs.transpose(0, 1) else: imgs = self.transforms(imgs) final = torch.zeros([self.video_params['num_frames'], 3, self.video_params['input_res'], self.video_params['input_res']]) final[:imgs.shape[0]] = imgs meta_arr = {'raw_captions': caption, 'paths': rel_fp, 'dataset': self.dataset_name} data = {'video': final, 'text': caption, 'meta': meta_arr} return data
class JSONTag(object): __slots__ = ('serializer',) key = None def __init__(self, serializer): self.serializer = serializer def check(self, value): raise NotImplementedError def to_json(self, value): raise NotImplementedError def to_python(self, value): raise NotImplementedError def tag(self, value): return {self.key: self.to_json(value)}
class ACW_loss(nn.Module): def __init__(self, ini_weight=0, ini_iteration=0, eps=1e-05, ignore_index=255): super(ACW_loss, self).__init__() self.ignore_index = ignore_index self.weight = ini_weight self.itr = ini_iteration self.eps = eps def forward(self, prediction, target): pred = F.softmax(prediction, 1) (one_hot_label, mask) = self.encode_one_hot_label(pred, target) acw = self.adaptive_class_weight(pred, one_hot_label, mask) err = torch.pow((one_hot_label - pred), 2) pnc = (err - (((1.0 - err) + self.eps) / ((1.0 + err) + self.eps)).log()) loss_pnc = torch.sum((acw * pnc), 1) intersection = ((2 * torch.sum((pred * one_hot_label), dim=(0, 2, 3))) + self.eps) union = (pred + one_hot_label) if (mask is not None): union[mask] = 0 union = (torch.sum(union, dim=(0, 2, 3)) + self.eps) dice = (intersection / union) return (loss_pnc.mean() - dice.mean().log()) def adaptive_class_weight(self, pred, one_hot_label, mask=None): self.itr += 1 sum_class = torch.sum(one_hot_label, dim=(0, 2, 3)) sum_norm = (sum_class / sum_class.sum()) self.weight = (((self.weight * (self.itr - 1)) + sum_norm) / self.itr) mfb = (self.weight.mean() / (self.weight + self.eps)) mfb = (mfb / mfb.sum()) mfb = torch.clamp(mfb, min=0.001, max=1.0) acw = (((1.0 + pred) + one_hot_label) * mfb.unsqueeze((- 1)).unsqueeze((- 1))) if (mask is not None): acw[mask] = 0 return acw def encode_one_hot_label(self, pred, target): one_hot_label = (pred.detach() * 0) if (self.ignore_index is not None): mask = (target == self.ignore_index) target = target.clone() target[mask] = 0 one_hot_label.scatter_(1, target.unsqueeze(1), 1) mask = mask.unsqueeze(1).expand_as(one_hot_label) one_hot_label[mask] = 0 return (one_hot_label, mask) else: one_hot_label.scatter_(1, target.unsqueeze(1), 1) return (one_hot_label, None)
def test_htc_aug_test(): aug_result = model_aug_test_template('configs/htc/htc_r50_fpn_1x_coco.py') assert (len(aug_result[0]) == 2) assert (len(aug_result[0][0]) == 80) assert (len(aug_result[0][1]) == 80)
def create_libri3mix_csv(datapath, savepath, addnoise=False, version='wav8k/min/', set_types=['train-360', 'dev', 'test']): for set_type in set_types: if addnoise: mix_path = os.path.join(datapath, version, set_type, 'mix_both/') else: mix_path = os.path.join(datapath, version, set_type, 'mix_clean/') s1_path = os.path.join(datapath, version, set_type, 's1/') s2_path = os.path.join(datapath, version, set_type, 's2/') s3_path = os.path.join(datapath, version, set_type, 's3/') noise_path = os.path.join(datapath, version, set_type, 'noise/') files = os.listdir(mix_path) mix_fl_paths = [(mix_path + fl) for fl in files] s1_fl_paths = [(s1_path + fl) for fl in files] s2_fl_paths = [(s2_path + fl) for fl in files] s3_fl_paths = [(s3_path + fl) for fl in files] noise_fl_paths = [(noise_path + fl) for fl in files] csv_columns = ['ID', 'duration', 'mix_wav', 'mix_wav_format', 'mix_wav_opts', 's1_wav', 's1_wav_format', 's1_wav_opts', 's2_wav', 's2_wav_format', 's2_wav_opts', 's3_wav', 's3_wav_format', 's3_wav_opts', 'noise_wav', 'noise_wav_format', 'noise_wav_opts'] with open((((savepath + '/libri3mix_') + set_type) + '.csv'), 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=csv_columns) writer.writeheader() for (i, (mix_path, s1_path, s2_path, s3_path, noise_path)) in enumerate(zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, s3_fl_paths, noise_fl_paths)): row = {'ID': i, 'duration': 1.0, 'mix_wav': mix_path, 'mix_wav_format': 'wav', 'mix_wav_opts': None, 's1_wav': s1_path, 's1_wav_format': 'wav', 's1_wav_opts': None, 's2_wav': s2_path, 's2_wav_format': 'wav', 's2_wav_opts': None, 's3_wav': s3_path, 's3_wav_format': 'wav', 's3_wav_opts': None, 'noise_wav': noise_path, 'noise_wav_format': 'wav', 'noise_wav_opts': None} writer.writerow(row)
def is_jit_tracing() -> bool: if ((torch.__version__ != 'parrots') and (digit_version(torch.__version__) >= digit_version('1.6.0'))): on_trace = torch.jit.is_tracing() if isinstance(on_trace, bool): return on_trace else: return torch._C._is_tracing() else: warnings.warn('torch.jit.is_tracing is only supported after v1.6.0. Therefore is_tracing returns False automatically. Please set on_trace manually if you are using trace.', UserWarning) return False
def generate_token_in_non_aggregation_expression(token, columns, result_value_name, group_by, data_str, index_str): if try_convert_to_aggregation_function(token): return try_convert_to_aggregation_function(token) if try_convert_comparision_token(token): return try_convert_comparision_token(token) if (token == result_value_name): raise ValueError('invalid expression: result variable %s should not occur in the non-aggregation part of objective or constraint', result_value_name) if (token in columns): if (not group_by): raise ValueError('invalid expression: column %s should not occur in the non-aggregation part of objective or constraint without GROUP BY', token) return ('%s["%s"][%s]' % (data_str, token, index_str)) return token
def dependency_parse(filepath, cp='', tokenize=True): print(('\nDependency parsing ' + filepath)) dirpath = os.path.dirname(filepath) filepre = os.path.splitext(os.path.basename(filepath))[0] tokpath = os.path.join(dirpath, (filepre + '.toks')) parentpath = os.path.join(dirpath, (filepre + '.parents')) relpath = os.path.join(dirpath, (filepre + '.rels')) tokenize_flag = ('-tokenize - ' if tokenize else '') cmd = ('java -cp %s DependencyParse -tokpath %s -parentpath %s -relpath %s %s < %s' % (cp, tokpath, parentpath, relpath, tokenize_flag, filepath)) os.system(cmd)
class Moebius(): def __call__(self, n): n = py_scalar_to_element(n) if (not isinstance(n, Integer)): if (n < 0): n = (- n) F = factor(n) for (_, e) in F: if (e >= 2): return 0 return ((- 1) ** len(F)) if (n == 0): return ZZ.zero() from sage.libs.pari.all import pari return ZZ(pari(n).moebius()) def __repr__(self): return 'The Moebius function' def plot(self, xmin=0, xmax=50, pointsize=30, rgbcolor=(0, 0, 1), join=True, **kwds): values = self.range(xmin, (xmax + 1)) v = [(n, values[(n - xmin)]) for n in range(xmin, (xmax + 1))] from sage.plot.all import list_plot P = list_plot(v, pointsize=pointsize, rgbcolor=rgbcolor, **kwds) if join: P += list_plot(v, plotjoined=True, rgbcolor=(0.7, 0.7, 0.7), **kwds) return P def range(self, start, stop=None, step=None): if (stop is None): (start, stop) = (1, int(start)) else: start = int(start) stop = int(stop) if (step is None): step = 1 else: step = int(step) if ((start <= 0 < stop) and ((start % step) == 0)): return ((self.range(start, 0, step) + [ZZ.zero()]) + self.range(step, stop, step)) from sage.libs.pari.all import pari if (step == 1): v = pari(('vector(%s, i, moebius(i-1+%s))' % ((stop - start), start))) else: n = len(range(start, stop, step)) v = pari(('vector(%s, i, moebius(%s*(i-1) + %s))' % (n, step, start))) return [Integer(x) for x in v]
def train(model, dataset_paths, save_every, steps, save_path, bsize): data_dicts = [] for d_path in dataset_paths: data_dicts.extend(pkl.load(open(d_path, 'rb'))) print(('%d datapoints' % len(data_dicts))) random.shuffle(data_dicts) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in model.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=1e-05) scheduler = get_linear_schedule_with_warmup(optimizer, 100, steps) loss_func = torch.nn.NLLLoss() for i in trange(steps): ds = [data_dicts[(j % len(data_dicts))] for j in range((i * bsize), ((i + 1) * bsize))] logits = model.get_logits_from_qc_(ds) gold = torch.tensor([d['a'] for d in ds]).to(device) loss = loss_func(logits, gold) loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() if (((i % save_every) == 0) and (i != 0)): model.save((save_path + str(i)))
class LibraryTrigger(Trigger): def __init__(self, fname: str, wsk_cmd: Optional[List[str]]=None): super().__init__() self.fname = fname if wsk_cmd: self._wsk_cmd = [*wsk_cmd, 'action', 'invoke', '--result', self.fname] def trigger_type() -> 'Trigger.TriggerType': return Trigger.TriggerType.LIBRARY def wsk_cmd(self) -> List[str]: assert self._wsk_cmd return self._wsk_cmd _cmd.setter def wsk_cmd(self, wsk_cmd: List[str]): self._wsk_cmd = [*wsk_cmd, 'action', 'invoke', '--result', self.fname] def get_command(payload: dict) -> List[str]: params = [] for (key, value) in payload.items(): params.append('--param') params.append(key) params.append(json.dumps(value)) return params def sync_invoke(self, payload: dict) -> ExecutionResult: command = (self.wsk_cmd + self.get_command(payload)) error = None try: begin = datetime.datetime.now() response = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True) end = datetime.datetime.now() parsed_response = response.stdout.decode('utf-8') except (subprocess.CalledProcessError, FileNotFoundError) as e: end = datetime.datetime.now() error = e openwhisk_result = ExecutionResult.from_times(begin, end) if (error is not None): self.logging.error('Invocation of {} failed!'.format(self.fname)) openwhisk_result.stats.failure = True return openwhisk_result return_content = json.loads(parsed_response) openwhisk_result.parse_benchmark_output(return_content) return openwhisk_result def async_invoke(self, payload: dict) -> concurrent.futures.Future: pool = concurrent.futures.ThreadPoolExecutor() fut = pool.submit(self.sync_invoke, payload) return fut def serialize(self) -> dict: return {'type': 'Library', 'name': self.fname} def deserialize(obj: dict) -> Trigger: return LibraryTrigger(obj['name']) def typename() -> str: return 'OpenWhisk.LibraryTrigger'
def make_mask(labels, n_cls, device): labels = labels.detach().cpu().numpy() n_samples = labels.shape[0] mask_multi = np.zeros([n_cls, n_samples]) for c in range(n_cls): c_indices = np.where((labels == c)) mask_multi[(c, c_indices)] = (+ 1) mask_multi = torch.tensor(mask_multi).type(torch.long) return mask_multi.to(device)
class LightSwitchEnv(hello_env.HelloEnv): def __init__(self): self.reset_y_lims = [(- 0.11), 0.0] self.MIN_Z = (- 0.28) self.MAX_Z = 0 self.MAX_Y = 0 self.MIN_Y = (- 0.14) self.GLOBAL_XYZ = [0, 0, 0] super(LightSwitchEnv, self).__init__() self.action_space = spaces.Box(low=np.array(([(- 1)] * 3), dtype=np.float32), high=np.array(([1] * 3), dtype=np.float32), dtype=np.float32) self.amax = np.array([0., 0., 0.]) self.amin = np.array([(- 0.), (- 0.), (- 0.)]) self.diff = (self.amax - self.amin) def _publish_home(self): home_publisher_list = Float64MultiArray() home_publisher_list.layout.data_offset = self.uid home_publisher_list.data = [0] self.home_publisher.publish(home_publisher_list) def reset(self): x = input('Retracting the arm... Press Enter to continue') if (self.GLOBAL_XYZ[2] != 0): self._publish_instruction([0, 0, 0, 0.0]) time.sleep(1) self._publish_instruction([(0.2 * 0.26), 0, 0.2, 0.0]) time.sleep(1) self.GLOBAL_XYZ[2] += 0.2 self.GLOBAL_XYZ[0] += (0.2 * 0.26) for i in range(3): self._publish_home() time.sleep(1) obs = {} x = input('Press Enter to continue... after reseting env') if (x == 'c'): x = input('Press Enter to continue... after reseting robot') for _ in range(3): self._publish_home() time.sleep(1) self.GLOBAL_XYZ = [0, 0, 0] new_y = np.random.uniform(self.reset_y_lims[0], self.reset_y_lims[1]) reset_y_action = np.array([0, (new_y - self.GLOBAL_XYZ[1]), 0, 0]) self._publish_instruction(reset_y_action) time.sleep(4) self.GLOBAL_XYZ = [0, new_y, 0] print('New Y:', new_y) obs['feature'] = None obs['pixels'] = self.get_obs() return obs def step(self, action): action = ((action * self.diff) + self.amin) action[1] = np.clip(action[1], (self.MIN_Y - self.GLOBAL_XYZ[1]), (self.MAX_Y - self.GLOBAL_XYZ[1])) action[2] = np.clip(action[2], (self.MIN_Z - self.GLOBAL_XYZ[2]), (self.MAX_Z - self.GLOBAL_XYZ[2])) self.GLOBAL_XYZ[1] += action[1] self.GLOBAL_XYZ[2] += action[2] while (self.prev_uid == self.uid): time.sleep(0.3) self.prev_uid = copy(self.uid) action[0] = (0.26 * action[2]) action = np.append(action, 0) self._publish_instruction(action) obs = {} obs['feature'] = None obs['pixels'] = self.get_obs() return (obs, 0, False, {'is_success': False})
class PowerSchedule(object): def __init__(self, schedule_timesteps, final_p, initial_p=1.0): self.schedule_timesteps = schedule_timesteps self.final_p = final_p self.initial_p = initial_p def value(self, t, power: int=1.0): decay_steps = min(t, self.schedule_timesteps) decayed_epsilon = (self.final_p + ((self.initial_p - self.final_p) * ((1 - (decay_steps / self.schedule_timesteps)) ** power))) return decayed_epsilon
def verifyJWTToken(token): try: payload = jwt.decode(token, '') return payload except jwt.ExpiredSignatureError: return 'Token has expired' except jwt.InvalidTokenError: return 'Invalid token' except Exception as e: return ('Error: ' + str(e))
def GetHits_PUndirNet(Graph, NIdHubH, NIdAuthH, MaxIter=20): return _snap.GetHits_PUndirNet(Graph, NIdHubH, NIdAuthH, MaxIter)
class SentenceWrapper(BaseInputExample): TEXT_NORMALIZATION_MAPPING = {'`': "'", '': '"', '': '"', '': "'", '': "'", '': '"', '': '"', '': '"', '': "'", '': "'", '': '--'} def __init__(self, spacy_sent): self.sent = spacy_sent def words(self): return [self.TEXT_NORMALIZATION_MAPPING.get(token.text, token.text) for token in self.sent] def space_after(self): return [bool(token.whitespace_) for token in self.sent] def tree(self): return None def leaves(self): return self.words def pos(self): return [(word, 'UNK') for word in self.words]
def distort_image_with_randaugment(image, num_layers, magnitude, fill_value=(128, 128, 128)): augmentation_hparams = dict(translate_rel=0.4, translate_const=100, fill_value=fill_value) available_ops = ['AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'SolarizeAdd'] for layer_num in range(num_layers): op_to_select = tf.random.uniform([], maxval=len(available_ops), dtype=tf.int32) random_magnitude = float(magnitude) with tf.name_scope('randaug_layer_{}'.format(layer_num)): for (i, op_name) in enumerate(available_ops): prob = tf.random.uniform([], minval=0.2, maxval=0.8, dtype=tf.float32) (func, _, args) = _parse_policy_info(op_name, prob, random_magnitude, augmentation_hparams) image = tf.cond(tf.equal(i, op_to_select), (lambda selected_func=func, selected_args=args: selected_func(image, *selected_args)), (lambda : image)) return image
class ERuleReader(object): def __init__(self, data_dir, save_dir, max_step=3) -> None: self.data_dir = data_dir self.save_dir = save_dir if (not os.path.exists(self.save_dir)): os.makedirs(self.save_dir) self.rel2id = {k: (v - 1) for (k, v) in json.load(open(os.path.join(data_dir, 'meta/rel2id.json'))).items()} self.id2rel = {k: v for (v, k) in self.rel2id.items()} self.R = (len(self.rel2id) - 1) self.type2id = json.load(open(os.path.join(data_dir, 'meta/ner2id.json'))) self.id2type = {k: v for (v, k) in self.type2id.items()} self.data_paths = {'rtrain': os.path.join(data_dir, 'rtrain.json'), 'train': os.path.join(data_dir, 'train_annotated.json'), 'dist': os.path.join(data_dir, 'train_distant.json'), 'dev': os.path.join(data_dir, 'dev.json'), 'test': os.path.join(data_dir, 'test.json')} self.bin_paths = {'rtrain': os.path.join(save_dir, 'cooccur-rtrain.pth'), 'train': os.path.join(save_dir, 'cooccur-train.pth'), 'dist': os.path.join(save_dir, 'cooccur-dist.pth'), 'dev': os.path.join(save_dir, 'cooccur-dev.pth'), 'test': os.path.join(save_dir, 'cooccur-test.pth')} self.max_step = max_step def read(self, split='train'): bin_path = self.bin_paths[split] if os.path.exists(bin_path): return torch.load(bin_path) else: features = self.read_raw(split) torch.save(features, bin_path) return features def read_raw(self, split='train'): max_step = self.max_step r2epair = self.get_r2epair() rule_counter = {(i, h, t): Counter() for i in range(self.R) for (h, t) in r2epair[i]} with open(self.data_paths[split]) as fp: data = json.load(fp) for item in tqdm(data, desc='reading raw data'): entities = item['vertexSet'] entity_types = [self.type2id[e[0]['type']] for e in entities] paths = {} meta_paths = {1: paths} for fact in item['labels']: (h, t, r) = (fact['h'], fact['t'], self.rel2id[fact['r']]) if (h not in paths): paths[h] = {t: [([r], [t])]} elif (t not in paths[h]): paths[h][t] = [([r], [t])] else: paths[h][t].append(([r], [t])) if (t not in paths): paths[t] = {h: [([(r + self.R)], [h])]} elif (h not in paths[t]): paths[t][h] = [([(r + self.R)], [h])] else: paths[t][h].append(([(r + self.R)], [h])) for step in range(2, (max_step + 1)): prev_paths = meta_paths[(step - 1)] paths = {} for h in prev_paths: for (inode, prev_chain) in prev_paths[h].items(): if (inode in meta_paths[1]): for (t, rs) in meta_paths[1][inode].items(): if (h == t): continue new_chain = append_chain(prev_chain, rs) if (not new_chain): continue if (h not in paths): paths[h] = {t: new_chain} elif (t not in paths[h]): paths[h][t] = new_chain else: paths[h][t].extend(new_chain) meta_paths[step] = paths for h in meta_paths[1]: for (t, rs) in meta_paths[1][h].items(): c_meta_paths = set() for step in range(1, (max_step + 1)): if ((h in meta_paths[step]) and (t in meta_paths[step][h])): for path in meta_paths[step][h][t]: c_meta_paths.add(tuple(path[0])) for r in rs: if (r[0][0] >= self.R): continue triple = (r[0][0], entity_types[h], entity_types[t]) rule_counter[triple].update(c_meta_paths) triples = [] triple2rules = {} triple2probs = {} lens = [len(epair) for epair in r2epair] for (ri, epairs) in enumerate(r2epair): for epair in epairs: triple = (ri, epair[0], epair[1]) total = sum(rule_counter[triple].values()) (rules, probs) = ([], []) for rule in rule_counter[triple]: rules.append(rule) probs.append((rule_counter[triple][rule] / total)) triples.append(triple) triple2rules[triple] = rules triple2probs[triple] = probs features = {'triples': triples, 'sections': lens, 'triple2rules': triple2rules, 'triple2probs': triple2probs} return features def get_r2epair(self): r2epair = [[] for _ in range((len(self.rel2id) - 1))] with open(self.data_paths['train']) as fp: data = json.load(fp) for item in data: entities = item['vertexSet'] entity_types = [self.type2id[e[0]['type']] for e in entities] for fact in item['labels']: (h, t, r) = (entity_types[fact['h']], entity_types[fact['t']], self.rel2id[fact['r']]) if ((h, t) not in r2epair[r]): r2epair[r].append((h, t)) return r2epair def get_epair2r(self): e_pair2r = torch.zeros(len(self.type2id), len(self.type2id), (len(self.rel2id) - 1)).bool() with open(self.data_paths['train']) as fp: data = json.load(fp) for item in data: entities = item['vertexSet'] entity_types = [self.type2id[e[0]['type']] for e in entities] for fact in item['labels']: (h, t, r) = (fact['h'], fact['t'], self.rel2id[fact['r']]) e_pair2r[(entity_types[h], entity_types[t], r)] = 1 print(e_pair2r.size(), e_pair2r.sum()) return e_pair2r def get_type_mask(self, triples, sections, split='train'): ntypes = len(self.type2id) rpair2id = [{} for _ in sections] tid = 0 for section in sections: for sid in range(section): (r, e1, e2) = triples[tid] rpair2id[r][(e1, e2)] = sid tid += 1 triple2sid = torch.CharTensor(ntypes, ntypes, self.R).fill_((- 1)) for ei in range(ntypes): for ej in range(ntypes): for r in range(self.R): triple2sid[(ei, ej, r)] = rpair2id[r].get((ei, ej), (- 1)) with open(self.data_paths[split]) as fp: data = json.load(fp) type_masks = [] for item in data: entities = item['vertexSet'] N = len(entities) entity_types = torch.tensor([self.type2id[e[0]['type']] for e in entities]) type_indices = (entity_types.unsqueeze(1).repeat(1, N), entity_types.unsqueeze(0).repeat(N, 1)) type_mask = triple2sid[(type_indices[0], type_indices[1])] type_masks.append(type_mask) return type_masks def get_dist(self, split='train'): with open(self.data_paths[split]) as fp: data = json.load(fp) dists = [] for item in tqdm(data, desc='reading raw data'): entities = item['vertexSet'] N = len(entities) entities_pos = [] for entity in entities: s = entity[0]['pos'][0] e = entity[0]['pos'][1] entities_pos.append([s, e]) dist = torch.zeros(N, N) for h in range(N): for t in range(N): (sh, eh) = entities_pos[h] (st, et) = entities_pos[t] dist[(h, t)] = min(abs((sh - et)), abs((st - eh))) dists.append(dist) return dists
def preprocess_data(): print('Preprocessing data...') print('build_child2parent_list') build_child2parent_list() print('tokenize_items_wikidata_n') tokenize_items_wikidata_n() print('build_inverse_index_spacy_token') build_inverse_index_spacy_token() print('index the raw qa files into data/BFS/train, data/BFS/dev and data/BFS/test') train = [] dev = [] test = [] for (root, dirs, files) in os.walk('data/CSQA'): for file in files: temp = os.path.join(root, file) if ('.json' in temp): if ('train' in temp): train.append(temp) elif ('valid' in temp): dev.append(temp) elif ('test' in temp): test.append(temp) for files in [('train', train), ('dev', dev), ('test', test)]: print('doing for {}'.format(files[0])) cont = 0 for f in tqdm(files[1], total=len(files[1])): dicts = json.load(open(f, 'r')) sentences = [] for d in dicts: sentences.append(d['utterance']) if (len(sentences) == 0): continue if (not os.path.exists(('data/BFS/' + files[0]))): os.makedirs(('data/BFS/' + files[0])) json.dump(dicts, open((((('data/BFS/' + files[0]) + '/QA') + str(cont)) + '.json'), 'w')) cont += 1 print('All done!')
class MockReplayPool(object): def __init__(self, desired_max_size, num_extras=0, rng=None, strategy='fifo'): assert (strategy in ['fifo', 'subsampled_batch']) self.max_size = desired_max_size self.num_extras = num_extras self._rng = rng self.strategy = strategy self.reset() def pool_is_batch(self): return (self.max_size is None) def rng(self): if (self._rng is None): return np.random return self._rng def reset(self): self.observations = None self.next_observations = None self.actions = None self.extras = [None for _ in range(self.num_extras)] def add_samples(self, *, obses, next_obses, actions, extras): num_samples = len(obses) if ((num_samples == self.max_size) or (self.max_size is None)): self.observations = obses self.next_observations = next_obses self.actions = actions self.extras = extras elif (self.strategy == 'fifo'): self.observations = obses[(- self.max_size):] self.next_observations = next_obses[(- self.max_size):] self.actions = actions[(- self.max_size):] self.extras = [e[(- self.max_size):] for e in extras] elif (self.strategy == 'subsampled_batch'): indices = self.rng.choice(num_samples, self.max_size, replace=False) self.observations = obses[indices] self.next_observations = next_obses[indices] self.actions = actions[indices] self.extras = [e[indices] for e in extras] def __len__(self): if (self.observations is None): return 0 return self.max_size def get_data(self, index=None, ordered=None): if (ordered is None): ordered = (self.strategy == 'fifo') if ordered: assert ((self.strategy == 'fifo') or (self.max_size is None)) data = (self.next_observations, self.observations, self.actions) if (index is not None): return data[index] return data def get_extra(self, index, ordered=None): if (ordered is None): ordered = (self.strategy == 'fifo') if ordered: assert ((self.strategy == 'fifo') or (self.max_size is None)) assert (self.extras[index] is not None) return self.extras[index]