code
stringlengths
101
5.91M
(assign_defaults=('activation_fn', 'l2loss', 'stddev', 'batch_normalize')) class deconv2d(prettytensor.VarStoreMethod): def __call__(self, input_layer, kernel, depth, name=PROVIDED, stride=None, activation_fn=None, l2loss=None, init=None, stddev=None, bias=True, edges=PAD_SAME, batch_normalize=False, phase=Phase.train): if (len(input_layer.shape) != 4): raise ValueError(('Cannot perform conv2d on tensor with shape %s' % input_layer.shape)) if (input_layer.shape[3] is None): raise ValueError('Input depth must be known') kernel = _kernel(kernel) stride = _stride(stride) size = [kernel[0], kernel[1], depth, input_layer.shape[3]] books = input_layer.bookkeeper if (init is None): if (stddev is None): patch_size = (size[0] * size[1]) init = layers.xavier_init((size[2] * patch_size), (size[3] * patch_size)) elif stddev: init = tf.truncated_normal_initializer(stddev=stddev) else: init = tf.zeros_initializer elif (stddev is not None): raise ValueError('Do not set both init and stddev.') dtype = input_layer.tensor.dtype params = self.variable('weights', size, init, dt=dtype) input_height = input_layer.shape[1] input_width = input_layer.shape[2] filter_height = kernel[0] filter_width = kernel[1] row_stride = stride[1] col_stride = stride[2] (out_rows, out_cols) = get2d_deconv_output_size(input_height, input_width, filter_height, filter_width, row_stride, col_stride, edges) output_shape = [input_layer.shape[0], out_rows, out_cols, depth] y = tf.nn.conv2d_transpose(input_layer, params, output_shape, stride, edges) layers.add_l2loss(books, params, l2loss) if bias: y += self.variable('bias', [size[(- 2)]], tf.zeros_initializer, dt=dtype) books.add_scalar_summary(tf.reduce_mean(layers.spatial_slice_zeros(y)), ('%s/zeros_spatial' % y.op.name)) if batch_normalize: y = input_layer.with_tensor(y).batch_normalize(phase=phase) if (activation_fn is not None): if (not isinstance(activation_fn, collections.Sequence)): activation_fn = (activation_fn,) y = layers.apply_activation(books, y, activation_fn[0], activation_args=activation_fn[1:]) return input_layer.with_tensor(y)
class CodeCompletionResult(Structure): _fields_ = [('cursorKind', c_int), ('completionString', c_object_p)] def __repr__(self): return str(CompletionString(self.completionString)) def kind(self): return CursorKind.from_id(self.cursorKind) def string(self): return CompletionString(self.completionString)
def micro_anaylsis_options(output_dir): options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy() options['select'] = ['micros', 'device'] options['min_micros'] = 1000 options['account_type_regexes'] = ['.*'] options['order_by'] = 'micros' if output_dir: options['dump_to_file'] = os.path.join(output_dir, 'micro.txt') return ('graph', options)
def _find_matching(idx, tlist, start_ttype, start_value, end_ttype, end_value): depth = 1 for tok in tlist.tokens[idx:]: if tok.match(start_ttype, start_value): depth += 1 elif tok.match(end_ttype, end_value): depth -= 1 if (depth == 1): return tok return None
def main(dataset_name, *args): paths = default_paths.get_default_paths() random.seed(1234) if (dataset_name in DATASET_MAPPING): DATASET_MAPPING[dataset_name](paths, dataset_name, *args) else: raise ValueError(f'dataset {dataset_name} currently not handled')
class Trainer(DefaultTrainer): def build_evaluator(cls, cfg, dataset_name, output_folder=None): if (output_folder is None): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference') os.makedirs(output_folder, exist_ok=True) evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if (evaluator_type == 'coco'): evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) elif (evaluator_type == 'ytvis'): evaluator_list.append(YTVISEvaluator(dataset_name, cfg, True, output_folder)) if (len(evaluator_list) == 0): raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type)) elif (len(evaluator_list) == 1): return evaluator_list[0] return DatasetEvaluators(evaluator_list) def build_train_loader(cls, cfg): dataset_name = cfg.DATASETS.TRAIN[0] if dataset_name.startswith('coco'): mapper = CocoClipDatasetMapper(cfg, is_train=True) elif dataset_name.startswith('ytvis'): mapper = YTVISDatasetMapper(cfg, is_train=True) dataset_dict = get_detection_dataset_dicts(dataset_name, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, proposal_files=(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None)) return build_detection_train_loader(cfg, mapper=mapper, dataset=dataset_dict) def build_test_loader(cls, cfg, dataset_name): dataset_name = cfg.DATASETS.TEST[0] if dataset_name.startswith('coco'): mapper = CocoClipDatasetMapper(cfg, is_train=False) elif dataset_name.startswith('ytvis'): mapper = YTVISDatasetMapper(cfg, is_train=False) return build_detection_test_loader(cfg, dataset_name, mapper=mapper) def build_optimizer(cls, cfg, model): params: List[Dict[(str, Any)]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for (key, value) in model.named_parameters(recurse=True): if (not value.requires_grad): continue if (value in memo): continue memo.add(value) lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if ('backbone' in key): lr = (lr * cfg.SOLVER.BACKBONE_MULTIPLIER) params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}] def maybe_add_full_model_gradient_clipping(optim): clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = (cfg.SOLVER.CLIP_GRADIENTS.ENABLED and (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model') and (clip_norm_val > 0.0)) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x['params'] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return (FullModelGradientClippingOptimizer if enable else optim) optimizer_type = cfg.SOLVER.OPTIMIZER if (optimizer_type == 'SGD'): optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM) elif (optimizer_type == 'ADAMW'): optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(params, cfg.SOLVER.BASE_LR) else: raise NotImplementedError(f'no optimizer type {optimizer_type}') if (not (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model')): optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer
def exodus_to_sinabs(model: torch.nn.Module): mapping_list = [(exodus_class, (lambda module, replacement=sinabs_class: replacement(**module.arg_dict))) for (sinabs_class, exodus_class) in module_map.items()] for (class_to_replace, mapper_fn) in mapping_list: model = sinabs.conversion.replace_module(model, class_to_replace, mapper_fn=mapper_fn) return model
def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module) register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module) register_functions_ns3_aodv(module.add_cpp_namespace('aodv'), root_module) return
class FloatPrimitiveStatement(PrimitiveStatement[float]): def __init__(self, test_case: tc.TestCase, value: (float | None)=None, constant_provider: (constants.ConstantProvider | None)=None) -> None: super().__init__(test_case, Instance(test_case.test_cluster.type_system.to_type_info(float)), value, constant_provider=constant_provider) def randomize_value(self) -> None: if (self._constant_provider and (randomness.next_float() <= config.configuration.seeding.seeded_primitives_reuse_probability) and ((seeded_value := self._constant_provider.get_constant_for(float)) is not None)): self._value = seeded_value else: val = (randomness.next_gaussian() * config.configuration.test_creation.max_int) precision = randomness.next_int(0, 7) self._value = round(val, precision) def delta(self) -> None: assert (self._value is not None) probability = randomness.next_float() if (probability < (1.0 / 3.0)): self._value += (randomness.next_gaussian() * config.configuration.test_creation.max_delta) elif (probability < (2.0 / 3.0)): self._value += randomness.next_gaussian() else: self._value = round(self._value, randomness.next_int(0, 7)) def clone(self, test_case: tc.TestCase, memo: dict[(vr.VariableReference, vr.VariableReference)]) -> FloatPrimitiveStatement: return FloatPrimitiveStatement(test_case, self._value, constant_provider=self._constant_provider) def __repr__(self) -> str: return f'FloatPrimitiveStatement({self._test_case}, {self._value})' def __str__(self) -> str: return f'{self._value}: float' def accept(self, visitor: StatementVisitor) -> None: visitor.visit_float_primitive_statement(self)
def get_pbt_agent_from_config(save_dir=None, sim_threads=0, seed=0, agent_idx=0, best=False, agent_to_load_path=None): if (agent_to_load_path is None): agent_folder = (save_dir + 'seed_{}/agent{}'.format(seed, agent_idx)) if best: agent_to_load_path = (agent_folder + '/best') else: agent_to_load_path = ((agent_folder + '/pbt_iter') + str(get_max_iter(agent_folder))) agent = get_agent_from_saved_model(agent_to_load_path, sim_threads) return agent
def get_args(): parser = argparse.ArgumentParser() parser = add_scarf_train_args(parser) return parser.parse_args()
def add_tree_nodes(proto_tree, tree, score): node = proto_tree.nodes.add() node.openNode = True if (score is not None): node.score = score node = proto_tree.nodes.add() node.value = tree.label for child in tree.children: if child.is_leaf(): node = proto_tree.nodes.add() node.value = child.label else: add_tree_nodes(proto_tree, child, None) node = proto_tree.nodes.add() node.closeNode = True
def gaussian(x, sigma): return (np.exp(((- (x ** 2)) / (2 * (sigma ** 2)))) / (sigma * np.sqrt((2 * np.pi))))
def GetKCoreEdges_PUndirNet(Graph, CoreIdSzV): return _snap.GetKCoreEdges_PUndirNet(Graph, CoreIdSzV)
class SphereFace(ArcMargin): def __init__(self, in_feats, out_feats, m=4) -> None: super().__init__(in_feats, out_feats, s=1, m1=m)
def find_best_blas_type(arrays=(), dtype=None): dtype = _np.dtype(dtype) max_score = _type_score.get(dtype.char, 5) prefer_fortran = False if arrays: if (len(arrays) == 1): max_score = _type_score.get(arrays[0].dtype.char, 5) prefer_fortran = arrays[0].flags['FORTRAN'] else: scores = [_type_score.get(x.dtype.char, 5) for x in arrays] max_score = max(scores) ind_max_score = scores.index(max_score) if ((max_score == 3) and (2 in scores)): max_score = 4 if arrays[ind_max_score].flags['FORTRAN']: prefer_fortran = True (prefix, dtype) = _type_conv.get(max_score, ('d', _np.dtype('float64'))) return (prefix, dtype, prefer_fortran)
def add_ResNet_roi_0fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform(blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale) s = model.AveragePool('pool5', 'res5_pool', kernel=7) return (s, 2048)
class SeekableFileObject(): def __init__(self, f): self.f = f self._i = 0 self._buffer = b'' self._have_all = False self.closed = False def read(self, n=None): if (n is None): pass else: n = int(n) if (n < 0): n = None if (not self._have_all): more = b'' if (n is None): more = self.f.read() self._have_all = True else: want_i = (self._i + n) want_more = (want_i - len(self._buffer)) if (want_more > 0): more = self.f.read(want_more) if (len(more) < want_more): self._have_all = True self._buffer += more if (n is None): res = self._buffer[self._i:] else: res = self._buffer[self._i:(self._i + n)] self._i += len(res) return res def tell(self): return self._i def seek(self, i, mode=0): i = int(i) if (mode == 0): if (i < 0): raise ValueError(('negative seek value ' + str(i))) real_i = i elif (mode == 1): real_i = max(0, (self._i + i)) elif (mode == 2): if (not self._have_all): self.read() real_i = max(0, (len(self._buffer) + i)) else: raise ValueError(('invalid whence (%s, should be 0, 1 or 2)' % i)) if (real_i <= len(self._buffer)): pass elif (not self._have_all): assert (real_i > self._i) self.read((real_i - self._i)) self._i = real_i return self._i def close(self): self.closed = True self.f.close() def isatty(self): return False def seekable(self): return True
def get_prompt(click_state, click_input): inputs = json.loads(click_input) points = click_state[0] labels = click_state[1] for input in inputs: points.append(input[:2]) labels.append(input[2]) click_state[0] = points click_state[1] = labels prompt = {'prompt_type': ['click'], 'input_point': click_state[0], 'input_label': click_state[1], 'multimask_output': 'True'} return prompt
def simulate(n=1000, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, seed=0, B=5000): np.random.seed(seed) (X, y, truth) = gaussian_instance(n=n, p=p, s=s, equicorrelated=False, rho=0.5, sigma=sigma, signal=signal, random_signs=True, scale=False, center=False)[:3] dispersion = (sigma ** 2) S = X.T.dot(y) covS = (dispersion * X.T.dot(X)) smooth_sampler = normal_sampler(S, covS) def meta_algorithm(X, XTXi, resid, sampler): (n, p) = X.shape rho = 0.8 S = sampler(scale=0.0) ynew = (X.dot(XTXi).dot(S) + resid) Xnew = ((rho * X) + (np.sqrt((1 - (rho ** 2))) * np.random.standard_normal(X.shape))) X_full = np.hstack([X, Xnew]) beta_full = np.linalg.pinv(X_full).dot(ynew) winners = (np.fabs(beta_full)[:p] > np.fabs(beta_full)[p:]) return set(np.nonzero(winners)[0]) XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = (y - X.dot(XTXi.dot(X.T.dot(y)))) dispersion = ((np.linalg.norm(resid) ** 2) / (n - p)) selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid) return full_model_inference(X, y, truth, selection_algorithm, smooth_sampler, success_params=(8, 10), B=B, fit_probability=keras_fit, fit_args={'epochs': 20, 'sizes': ([100] * 5), 'dropout': 0.0, 'activation': 'relu'})
.parametrize('ex_filename', examples_dg) def test_examples_dg(ex_filename, output_dir): conditions = run_declaratice_example(ex_filename=inedir(ex_filename), output_dir=output_dir, ext='.msh', remove_prefix=examples_dir) ok = check_conditions(conditions) assert ok
def ackley(ind, a=20.0, b=0.2, c=(2.0 * math.pi)): return ((((((- a) * math.exp(((- b) * math.sqrt(((1.0 / len(ind)) * sum(((x ** 2.0) for x in ind))))))) - math.exp(((1.0 / len(ind)) * sum((math.cos((c * ind[i])) for i in range(len(ind))))))) + a) + math.exp(1.0)),)
class LoT_Unet(nn.Module): def __init__(self, LoT_Layer, Unet_part): super(LoT_Unet, self).__init__() self.Unet = Unet_part self.LoT_Layer = LoT_Layer def forward(self, wphs, masks, TEs, B0, z_prjs): (LoT_Filtered_results, LearnableFilterd_results) = self.LoT_Layer(wphs, masks, TEs, B0) recon = self.Unet(LoT_Filtered_results, LearnableFilterd_results, z_prjs) recon = (recon / 4) return recon
def features(sentence, index): return {'word': sentence[index], 'is_first': (index == 0), 'is_last': (index == (len(sentence) - 1)), 'is_capitalized': (sentence[index][0].upper() == sentence[index][0]), 'is_all_caps': (sentence[index].upper() == sentence[index]), 'is_all_lower': (sentence[index].lower() == sentence[index]), 'prefix-1': sentence[index][0], 'prefix-2': sentence[index][:2], 'prefix-3': sentence[index][:3], 'suffix-1': sentence[index][(- 1)], 'suffix-2': sentence[index][(- 2):], 'suffix-3': sentence[index][(- 3):], 'prev_word': ('' if (index == 0) else sentence[(index - 1)]), 'next_word': ('' if (index == (len(sentence) - 1)) else sentence[(index + 1)]), 'has_hyphen': ('-' in sentence[index]), 'is_numeric': sentence[index].isdigit(), 'capitals_inside': (sentence[index][1:].lower() != sentence[index][1:])}
_utils.test() def test_zero_inner_loop(): x = ti.field(ti.i32, shape=()) def test(): for i in range(1): for j in range(0): x[None] = 1 test() assert (x[None] == 0)
class ADE20KSegmentation(BaseMMSeg): def __init__(self, image_size, crop_size, split, **kwargs): super().__init__(image_size, crop_size, split, ADE20K_CONFIG_PATH, **kwargs) (self.names, self.colors) = utils.dataset_cat_description(ADE20K_CATS_PATH) self.n_cls = 150 self.ignore_label = 0 self.reduce_zero_label = True def update_default_config(self, config): root_dir = dataset_dir() path = (Path(root_dir) / 'ade20k') config.data_root = path if (self.split == 'train'): config.data.train.data_root = (path / 'ADEChallengeData2016') elif (self.split == 'trainval'): config.data.trainval.data_root = (path / 'ADEChallengeData2016') elif (self.split == 'val'): config.data.val.data_root = (path / 'ADEChallengeData2016') elif (self.split == 'test'): config.data.test.data_root = (path / 'release_test') config = super().update_default_config(config) return config def test_post_process(self, labels): return (labels + 1)
def all_estimators(type_filter=None): from ..base import BaseEstimator, ClassifierMixin, ClusterMixin, RegressorMixin, TransformerMixin from . import IS_PYPY from ._testing import ignore_warnings def is_abstract(c): if (not hasattr(c, '__abstractmethods__')): return False if (not len(c.__abstractmethods__)): return False return True all_classes = [] root = str(Path(__file__).parent.parent) with ignore_warnings(category=FutureWarning): for (_, module_name, _) in pkgutil.walk_packages(path=[root], prefix='sklearn.'): module_parts = module_name.split('.') if (any(((part in _MODULE_TO_IGNORE) for part in module_parts)) or ('._' in module_name)): continue module = import_module(module_name) classes = inspect.getmembers(module, inspect.isclass) classes = [(name, est_cls) for (name, est_cls) in classes if (not name.startswith('_'))] if (IS_PYPY and ('feature_extraction' in module_name)): classes = [(name, est_cls) for (name, est_cls) in classes if (name == 'FeatureHasher')] all_classes.extend(classes) all_classes = set(all_classes) estimators = [c for c in all_classes if (issubclass(c[1], BaseEstimator) and (c[0] != 'BaseEstimator'))] estimators = [c for c in estimators if (not is_abstract(c[1]))] if (type_filter is not None): if (not isinstance(type_filter, list)): type_filter = [type_filter] else: type_filter = list(type_filter) filtered_estimators = [] filters = {'classifier': ClassifierMixin, 'regressor': RegressorMixin, 'transformer': TransformerMixin, 'cluster': ClusterMixin} for (name, mixin) in filters.items(): if (name in type_filter): type_filter.remove(name) filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)]) estimators = filtered_estimators if type_filter: raise ValueError(f"Parameter type_filter must be 'classifier', 'regressor', 'transformer', 'cluster' or None, got {repr(type_filter)}.") return sorted(set(estimators), key=itemgetter(0))
def generate_ccp_dataset(args): args.data_root = Path(args.data_root) args.img_root = (args.data_root / 'images') args.ann_root = (args.data_root / 'annotations') args.save_root = Path(args.save_root) args.save_root.mkdir() generate_mhp_dataset(args, 'train', 'A', get_cat_id(args.cat1)) generate_mhp_dataset(args, 'train', 'B', get_cat_id(args.cat2)) generate_mhp_dataset(args, 'test', 'A', get_cat_id(args.cat1)) generate_mhp_dataset(args, 'test', 'B', get_cat_id(args.cat2))
def proc_cpu_task(task_list, task_q, fin_q, verbose=False): for task_id in iter(task_q.get, (- 1)): task = task_list[task_id] if verbose: print(task[0]) batch_size = len(task[1][0]) imgmat_size = (batch_size, 299, 299, 3) lblmat_size = (batch_size, 1000) if (task[2] == 1): _ = np.clip((get_array(task[1][1], imgmat_size)[...] - (np.sign(get_array(task[1][3], imgmat_size)[...]) * task[3][0])), get_array(task[1][4], imgmat_size)[...], get_array(task[1][5], imgmat_size)[...], out=get_array(task[1][1], imgmat_size)[...]) get_array(task[1][3], imgmat_size)[...].fill(0.0) elif (task[2] == 2): adv_imgs = get_array(task[1][1], imgmat_size)[...].astype(np.uint8) image_generators.save_images(adv_imgs, [fname[(task[3][0] + 1):] for fname in task[1][0]], task[3][1]) elif (task[2] == 4): _ = np.clip((get_array(task[1][1], imgmat_size)[...] + np.random.normal(scale=task[3][0], size=imgmat_size)), get_array(task[1][4], imgmat_size)[...], get_array(task[1][5], imgmat_size)[...], out=get_array(task[1][1], imgmat_size)[...]) if verbose: print(task[0], ': Finished at', (time.time() - start_time)) fin_q.put(task_id)
class Classifier(nn.Module): def __init__(self): super().__init__() self.layer0 = nn.Sequential(nn.Linear(64, 200), nn.Dropout(0.1), nn.BatchNorm1d(200), nn.ReLU()) self.layer1 = nn.Sequential(nn.Linear(200, 10)) self.layers = [self.layer0, self.layer1] def forward(self, x: torch.Tensor, return_full_list=False, clip_grad=False): def _clip_grad(v, min, max): v_tmp = v.expand_as(v) v_tmp.register_hook((lambda g: g.clamp(min, max))) return v_tmp out = [] for layer in self.layers: x = layer(x) if clip_grad: x = _clip_grad(x, (- clip_grad), clip_grad) out.append(x) if (not return_full_list): out = out[(- 1)] return out
def binary_tournament(pop, offspring_size): parents = [] length = len(pop) for _ in range(offspring_size): winner = np.min(Genotype.global_rng.randint(length, size=2)) parents.append(pop[winner]) if (not parents): parents = [pop[0], pop[0]] return parents
def build_pos_tag_vocab(data, vocab_size=1000, min_freq=1): counter = Counter() for d in data: tags = d['tags'] counter.update(tags) itos = ['<pad>'] min_freq = max(min_freq, 1) words_and_frequencies = sorted(counter.items(), key=(lambda tup: tup[0])) words_and_frequencies.sort(key=(lambda tup: tup[1]), reverse=True) for (word, freq) in words_and_frequencies: if ((freq < min_freq) or (len(itos) == vocab_size)): break itos.append(word) stoi = defaultdict() stoi.update({tok: i for (i, tok) in enumerate(itos)}) return {'itos': itos, 'stoi': stoi, 'len': len(itos)}
def type_from_comparison(a: Square) -> (int | None): if (a.a < 1000): return 0 return None
def hessian(func, inputs, create_graph=False, strict=False): (is_inputs_tuple, inputs) = _as_tuple(inputs, 'inputs', 'hessian') def ensure_single_output_function(*inp): out = func(*inp) (is_out_tuple, t_out) = _as_tuple(out, 'outputs of the user-provided function', 'hessian') _check_requires_grad(t_out, 'outputs', strict=strict) if (is_out_tuple or (not isinstance(out, torch.Tensor))): raise RuntimeError('The function given to hessian should return a single Tensor') if (out.nelement() != 1): raise RuntimeError('The Tensor returned by the function given to hessian should contain a single element') return out.squeeze() def jac_func(*inp): jac = jacobian(ensure_single_output_function, inp, create_graph=True) _check_requires_grad(jac, 'jacobian', strict=strict) return jac res = jacobian(jac_func, inputs, create_graph=create_graph, strict=strict) return _tuple_postprocess(res, (is_inputs_tuple, is_inputs_tuple))
def ground_truth_reconstruct(inp, sigma, step_size, num_points=2048, num_steps=100, decay=1, interval=10, weight=1): with torch.no_grad(): x = get_prior(inp.size(0), inp.size(1), inp.size((- 1))).cuda() x_list = [] x_list.append(x.clone()) for t in range(num_steps): z_t = (torch.randn_like(x) * weight) x += (np.sqrt(step_size) * z_t) grad = ground_truth_field(x, inp, sigma) x += ((0.5 * step_size) * grad) if ((t % (num_steps // interval)) == 0): step_size *= decay x_list.append(x.clone()) return (x, x_list)
class FrozenBatchNorm2d(nn.Module): _version = 3 def __init__(self, num_features, eps=1e-05): super().__init__() self.num_features = num_features self.eps = eps self.register_buffer('weight', torch.ones(num_features)) self.register_buffer('bias', torch.zeros(num_features)) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', (torch.ones(num_features) - eps)) def forward(self, x): scale = (self.weight * (self.running_var + self.eps).rsqrt()) bias = (self.bias - (self.running_mean * scale)) scale = scale.reshape(1, (- 1), 1, 1) bias = bias.reshape(1, (- 1), 1, 1) return ((x * scale) + bias) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if ((version is None) or (version < 2)): if ((prefix + 'running_mean') not in state_dict): state_dict[(prefix + 'running_mean')] = torch.zeros_like(self.running_mean) if ((prefix + 'running_var') not in state_dict): state_dict[(prefix + 'running_var')] = torch.ones_like(self.running_var) if ((version is not None) and (version < 3)): logger = logging.getLogger(__name__) logger.info('FrozenBatchNorm {} is upgraded to version 3.'.format(prefix.rstrip('.'))) state_dict[(prefix + 'running_var')] -= self.eps super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def __repr__(self): return 'FrozenBatchNorm2d(num_features={}, eps={})'.format(self.num_features, self.eps) def convert_frozen_batchnorm(cls, module): bn_module = nn.modules.batchnorm bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) res = module if isinstance(module, bn_module): res = cls(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for (name, child) in module.named_children(): new_child = cls.convert_frozen_batchnorm(child) if (new_child is not child): res.add_module(name, new_child) return res
def main(): np.random.seed(args['SEED']) torch.manual_seed(args['SEED']) gpuAvailable = torch.cuda.is_available() device = torch.device(('cuda' if gpuAvailable else 'cpu')) filesList = list() for (root, dirs, files) in os.walk(args['DATA_DIRECTORY']): for file in files: if file.endswith('.mp4'): filesList.append(os.path.join(root, file[:(- 4)])) print(('\nNumber of data samples to be processed = %d' % len(filesList))) print('\n\nStarting preprocessing ....\n') for file in tqdm(filesList, leave=True, desc='Preprocess', ncols=75): preprocess_sample(file) print('\nPreprocessing Done.') print('\n\nGenerating the noise file ....') noise = np.empty(0) while (len(noise) < (16000 * 3600)): noisePart = np.zeros((16000 * 60)) indices = np.random.randint(0, len(filesList), 20) for ix in indices: (sampFreq, audio) = wavfile.read((filesList[ix] + '.wav')) audio = (audio / np.max(np.abs(audio))) pos = np.random.randint(0, (abs((len(audio) - len(noisePart))) + 1)) if (len(audio) > len(noisePart)): noisePart = (noisePart + audio[pos:(pos + len(noisePart))]) else: noisePart = (noisePart[pos:(pos + len(audio))] + audio) noise = np.concatenate([noise, noisePart], axis=0) noise = noise[:(16000 * 3600)] noise = ((noise / 20) * 32767) noise = np.floor(noise).astype(np.int16) wavfile.write((args['DATA_DIRECTORY'] + '/noise.wav'), 16000, noise) print('\nNoise file generated.') print('\n\nGenerating the preval.txt file ....') with open((args['DATA_DIRECTORY'] + '/pretrain.txt'), 'r') as f: lines = f.readlines() if os.path.exists((args['DATA_DIRECTORY'] + '/preval.txt')): with open((args['DATA_DIRECTORY'] + '/preval.txt'), 'r') as f: lines.extend(f.readlines()) indices = np.arange(len(lines)) np.random.shuffle(indices) valIxs = np.sort(indices[:int(np.ceil((args['PRETRAIN_VAL_SPLIT'] * len(indices))))]) trainIxs = np.sort(indices[int(np.ceil((args['PRETRAIN_VAL_SPLIT'] * len(indices)))):]) lines = np.sort(np.array(lines)) with open((args['DATA_DIRECTORY'] + '/pretrain.txt'), 'w') as f: f.writelines(list(lines[trainIxs])) with open((args['DATA_DIRECTORY'] + '/preval.txt'), 'w') as f: f.writelines(list(lines[valIxs])) print('\npreval.txt file generated.\n') return
class ConcatTable(Container): def __init__(self): super(ConcatTable, self).__init__() self.modules = [] self.output = [] def updateOutput(self, input): self.output = [module.updateOutput(input) for module in self.modules] return self.output def _map_list(self, l1, l2, f): for (i, v) in enumerate(l2): if isinstance(v, list): res = self._map_list((l1[i] if (i < len(l1)) else []), v, f) if (i >= len(l1)): assert (i == len(l1)) l1.append(res) else: l1[i] = res else: f(l1, i, v) for i in range((len(l1) - 1), (len(l2) - 1), (- 1)): del l1[i] return l1 def _backward(self, method, input, gradOutput, scale=1): isTable = isinstance(input, list) wasTable = isinstance(self.gradInput, list) if isTable: for (i, module) in enumerate(self.modules): if (method == 'updateGradInput'): currentGradInput = module.updateGradInput(input, gradOutput[i]) elif (method == 'backward'): currentGradInput = module.backward(input, gradOutput[i], scale) if (not isinstance(currentGradInput, list)): raise RuntimeError('currentGradInput is not a table!') if (len(input) != len(currentGradInput)): raise RuntimeError('table size mismatch') if (i == 0): self.gradInput = (self.gradInput if wasTable else []) def fn(l, i, v): if (i >= len(l)): assert (len(l) == i) l.append(v.clone()) else: l[i].resize_as_(v) l[i].copy_(v) self._map_list(self.gradInput, currentGradInput, fn) else: def fn(l, i, v): if (i < len(l)): l[i].add_(v) else: assert (len(l) == i) l.append(v.clone()) self._map_list(self.gradInput, currentGradInput, fn) else: self.gradInput = (self.gradInput if (not wasTable) else input.clone()) for (i, module) in enumerate(self.modules): if (method == 'updateGradInput'): currentGradInput = module.updateGradInput(input, gradOutput[i]) elif (method == 'backward'): currentGradInput = module.backward(input, gradOutput[i], scale) if (i == 0): self.gradInput.resize_as_(currentGradInput).copy_(currentGradInput) else: self.gradInput.add_(currentGradInput) return self.gradInput def updateGradInput(self, input, gradOutput): return self._backward('updateGradInput', input, gradOutput) def backward(self, input, gradOutput, scale=1): return self._backward('backward', input, gradOutput, scale) def accGradParameters(self, input, gradOutput, scale=1): for (i, module) in ipairs(self.modules): self.rethrowErrors(module, i, 'accGradParameters', input, gradOutput[i], scale) def accUpdateGradParameters(self, input, gradOutput, lr): for (i, module) in ipairs(self.modules): self.rethrowErrors(module, i, 'accUpdateGradParameters', input, gradOutput[i], lr) def __repr__(self): tab = ' ' line = '\n' next = ' |`-> ' ext = ' | ' extlast = ' ' last = ' +. -> ' res = torch.typename(self) res = ((((res + ' {') + line) + tab) + 'input') for i in range(len(self.modules)): if (i == (len(self.modules) - 1)): res = (((((((res + line) + tab) + next) + '(') + str(i)) + '): ') + str(self.modules[i]).replace(line, ((line + tab) + extlast))) else: res = (((((((res + line) + tab) + next) + '(') + str(i)) + '): ') + str(self.modules[i]).replace(line, ((line + tab) + ext))) res = ((((res + line) + tab) + last) + 'output') res = ((res + line) + '}') return res
def test_identity_scaler(): import numpy as np from pysad.transform.preprocessing import IdentityScaler X = np.random.rand(100, 25) scaler = IdentityScaler() scaled_X = scaler.fit_transform(X) assert np.all(np.isclose(scaled_X, X)) scaler = scaler.fit(X) scaled_X = scaler.transform(X) assert np.all(np.isclose(scaled_X, X))
def main(): args = parse_args() if (args.dataset is not None): if (args.names is None): args.names = pathlib.Path(f'data/{args.dataset}/processed/names.txt') if (args.out_dir is None): args.out_dir = pathlib.Path(f'data/{args.dataset}/processed/') logging.basicConfig(stream=sys.stdout, level=(logging.ERROR if args.quiet else logging.INFO), format='%(levelname)-8s %(message)s') logging.info(f'''Using arguments: {pprint.pformat(vars(args))}''') random.seed(args.seed) logging.info('Loading names...') names = utils.load_txt(args.names) logging.info(f'Loaded {len(names)} names.') n_valid = int((len(names) * args.ratio_valid)) n_test = int((len(names) * args.ratio_test)) sampled = random.sample(names, (n_valid + n_test)) valid_names = sampled[:n_valid] test_names = sampled[n_valid:] train_names = [name for name in names if (name not in sampled)] utils.save_txt((args.out_dir / 'train-names.txt'), train_names) logging.info(f'Collected {len(train_names)} files for training.') utils.save_txt((args.out_dir / 'valid-names.txt'), valid_names) logging.info(f'Collected {len(valid_names)} files for validation.') utils.save_txt((args.out_dir / 'test-names.txt'), test_names) logging.info(f'Collected {len(test_names)} files for test.')
def _softmax_cross_entropy_with_logits(logits, labels): param = list(range(len(logits.shape))) transpose_param = (([0] + [param[(- 1)]]) + param[1:(- 1)]) logits = logits.permute(*transpose_param) loss_ftor = nn.CrossEntropyLoss(reduction='none') loss = loss_ftor(logits, labels.max(dim=(- 1))[1]) return loss
def get_accuracy(params_repl): good = total = 0 steps = (input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size) for (_, batch) in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()): predicted = vit_apply_repl(params_repl, batch['image']) is_same = (predicted.argmax(axis=(- 1)) == batch['label'].argmax(axis=(- 1))) good += is_same.sum() total += len(is_same.flatten()) return (good / total)
def contentvec_km100(refresh=False, **kwds): kwds['ckpt'] = ' return hubert_custom(refresh=refresh, **kwds)
def get_model_infos(model, shape): model = add_flops_counting_methods(model) model.eval() cache_inputs = torch.rand(*shape) if next(model.parameters()).is_cuda: cache_inputs = cache_inputs.cuda() with torch.no_grad(): _____ = model(cache_inputs) FLOPs = (compute_average_flops_cost(model) / 1000000.0) Param = count_parameters_in_MB(model) if hasattr(model, 'auxiliary_param'): aux_params = count_parameters_in_MB(model.auxiliary_param()) print('The auxiliary params of this model is : {:}'.format(aux_params)) print('We remove the auxiliary params from the total params ({:}) when counting'.format(Param)) Param = (Param - aux_params) torch.cuda.empty_cache() model.apply(remove_hook_function) return (FLOPs, Param)
def get_dist_loader(features, shuffle, args): input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) decoder_input_ids = torch.tensor([f.decoder_input_ids for f in features], dtype=torch.long) decoder_attention_mask = torch.tensor([f.decoder_attention_mask for f in features], dtype=torch.long) lm_labels = torch.tensor([f.lm_labels for f in features], dtype=torch.long) dataset = TensorDataset(input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, lm_labels) sampler = DistributedSampler(dataset) dataloader = DataLoader(dataset=dataset, sampler=sampler, pin_memory=True, batch_size=args.batch_size, shuffle=None, num_workers=args.workers) return (dataloader, sampler)
def get_subtrees(input_file, *args): cmd = ['java', 'edu.stanford.nlp.trees.OutputSubtrees', '-input', input_file] if (len(args) > 0): cmd = (cmd + list(args)) print(' '.join(cmd)) results = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') lines = results.stdout.split('\n') lines = [x.strip() for x in lines] lines = [x for x in lines if x] lines = [x.split(maxsplit=1) for x in lines] phrases = [SentimentDatum(x[0], x[1].split()) for x in lines] return phrases
class DictOffsetSlot(SlotDescriptor): def slot_code(self, scope): dict_entry = (scope.lookup_here('__dict__') if (not scope.is_closure_class_scope) else None) if (dict_entry and dict_entry.is_variable): if (getattr(dict_entry.type, 'cname', None) != 'PyDict_Type'): error(dict_entry.pos, "__dict__ slot must be of type 'dict'") return '0' type = scope.parent_type if type.typedef_flag: objstruct = type.objstruct_cname else: objstruct = ('struct %s' % type.objstruct_cname) return ('offsetof(%s, %s)' % (objstruct, dict_entry.cname)) else: return '0'
def test_gl_dilation_neck(): neck = GLDilationNeck(in_channels=8) x = torch.rand((2, 8, 64, 64)) res = neck(x) assert (res.shape == (2, 8, 64, 64)) if torch.cuda.is_available(): neck = GLDilationNeck(in_channels=8).cuda() x = torch.rand((2, 8, 64, 64)).cuda() res = neck(x) assert (res.shape == (2, 8, 64, 64)) neck = GLDilationNeck(in_channels=8, conv_type='gated_conv').cuda() res = neck(x) assert isinstance(neck.dilation_convs[0], SimpleGatedConvModule) assert (res.shape == (2, 8, 64, 64))
def register_all_coco(root): for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_COCO.items(): for (key, (image_root, json_file)) in splits_per_dataset.items(): register_coco_instances(key, _get_builtin_metadata(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root))
class NormalizeVector(object): def __init__(self, mean=0.0, std=1.0): self.std = torch.Tensor(std) self.std[(self.std == 0.0)] = 1.0 self.mean = torch.Tensor(mean) def __call__(self, tensor: torch.Tensor) -> torch.Tensor: assert isinstance(tensor, torch.Tensor) return ((tensor - self.mean) / self.std) def __repr__(self): return (self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std))
def reconstruct_interp_matrix(idx, proj): if _is_real(proj): return backend.idd_reconint((idx + 1), proj) else: return backend.idz_reconint((idx + 1), proj)
def test_result(): res = crosstab([0, 1], [1, 2]) assert_equal((res.elements, res.count), res)
class Trainer(Trainer): def __init__(self, args=None, vocab=None, model_file=None, use_cuda=False): self.use_cuda = use_cuda if (model_file is not None): self.load(model_file) else: self.args = args self.vocab = vocab self.model = Tokenizer(self.args, self.args['vocab_size'], self.args['emb_dim'], self.args['hidden_dim'], dropout=self.args['dropout']) self.criterion = nn.CrossEntropyLoss(ignore_index=(- 1)) if use_cuda: self.model.cuda() self.criterion.cuda() else: self.model.cpu() self.criterion.cpu() self.parameters = [p for p in self.model.parameters() if p.requires_grad] self.optimizer = optim.Adam(self.parameters, lr=self.args['lr0'], betas=(0.9, 0.9), weight_decay=self.args['weight_decay']) self.feat_funcs = self.args.get('feat_funcs', None) self.lang = self.args['lang'] def update(self, inputs): self.model.train() (units, labels, features, _) = inputs if self.use_cuda: units = units.cuda() labels = labels.cuda() features = features.cuda() pred = self.model(units, features) self.optimizer.zero_grad() classes = pred.size(2) loss = self.criterion(pred.view((- 1), classes), labels.view((- 1))) loss.backward() nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm']) self.optimizer.step() return loss.item() def predict(self, inputs): self.model.eval() (units, labels, features, _) = inputs if self.use_cuda: units = units.cuda() labels = labels.cuda() features = features.cuda() pred = self.model(units, features) return pred.data.cpu().numpy() def save(self, filename): params = {'model': (self.model.state_dict() if (self.model is not None) else None), 'vocab': self.vocab.state_dict(), 'config': self.args} try: torch.save(params, filename) print('model saved to {}'.format(filename)) except BaseException: print('[Warning: Saving failed... continuing anyway.]') def load(self, filename): try: checkpoint = torch.load(filename, (lambda storage, loc: storage)) except BaseException: print('Cannot load model from {}'.format(filename)) sys.exit(1) self.args = checkpoint['config'] self.model = Tokenizer(self.args, self.args['vocab_size'], self.args['emb_dim'], self.args['hidden_dim'], dropout=self.args['dropout']) self.model.load_state_dict(checkpoint['model']) self.vocab = Vocab.load_state_dict(checkpoint['vocab'])
def test_initial_state(model=None): if (model is None): model = SimpleModel() states = build_initial_state(model) assert (len(states) == 1) state = states[0] assert (state.sentence_length == 3) assert (state.num_opens == 0) assert (len(state.word_queue) == 5) assert (len(state.constituents) == 1) assert (len(state.transitions) == 1) assert (state.word_position == 0)
class HMACAlgorithm(SigningAlgorithm): default_digest_method = staticmethod(hashlib.sha1) def __init__(self, digest_method=None): if (digest_method is None): digest_method = self.default_digest_method self.digest_method = digest_method def get_signature(self, key, value): mac = hmac.new(key, msg=value, digestmod=self.digest_method) return mac.digest()
_model def seresnext101_32x4d(pretrained=False, **kwargs): model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, block_args=dict(attn_layer='se'), **kwargs) return _create_resnet('seresnext101_32x4d', pretrained, **model_args)
def chunked(iterable, batch_size): iterable = iter(iterable) iterator = iter((lambda : list(itertools.islice(iterable, batch_size))), []) return iterator
class Rotate90Augmenter(dptspatialaugmenterbase.SpatialAugmenterBase): def __init__(self, k_list): super().__init__(keyword='rotate_90') self.__k_list = [] self.__k = None self.__setklist(k_list=k_list) def __setklist(self, k_list): if ((len(k_list) < 1) or any(((isinstance(k_item, float) and (not float.is_integer(k_item))) for k_item in k_list))): raise Exception('InvalidRotationRepetitionListError(k_list)') self.__k_list = [(int(k_item) % 4) for k_item in k_list] self.__k = self.__k_list[0] def transform(self, patch): patch_transformed = np.rot90(m=patch, k=self.__k, axes=(1, 2)) return patch_transformed def randomize(self): self.__k = np.random.choice(a=self.__k_list, size=None)
def recursiveSelection(cont, it, curr_list): if (it == len(cont)): return [curr_list] res = [] for idi in range(cont[it]): res += recursiveSelection(cont, (it + 1), (curr_list + [idi])) return res
class HighestWeightTensorKRT(UniqueRepresentation): def __init__(self, tp_krt): self.tp_krt = tp_krt self._cache = None def __getitem__(self, i): if (self._cache is None): self._cache = tuple([x.to_tensor_product_of_kirillov_reshetikhin_tableaux() for x in self.tp_krt.rigged_configurations().module_generators]) return self._cache[i] def __iter__(self): if (self._cache is None): self._cache = tuple([x.to_tensor_product_of_kirillov_reshetikhin_tableaux() for x in self.tp_krt.rigged_configurations().module_generators]) (yield from self._cache) def __repr__(self): return 'Highest weight elements of {}'.format(self.tp_krt) _method def cardinality(self): count = 0 for x in self: count += 1 return Integer(count) __len__ = cardinality
class LRTensorBoard(TensorBoard): def __init__(self, log_dir: str, **kwargs): super(LRTensorBoard, self).__init__(log_dir=log_dir, **kwargs) def on_epoch_end(self, epoch, logs=None): logs.update({'lr': tf.keras.backend.eval(self.model.optimizer.lr)}) super(LRTensorBoard, self).on_epoch_end(epoch, logs)
class SampleCounter(object): def __init__(self): self.num_samples = 0 self.mean = 0 def add_sample(self, _sample): self.num_samples += 1
class FourTi2Executable(Executable): def __init__(self, name): from sage.env import SAGE_ENV Executable.__init__(self, name=('4ti2-' + name), executable=(SAGE_ENV.get(('FOURTITWO_' + name.upper()), None) or name), spkg='4ti2')
class DenseAspp(nn.Module): def __init__(self, num_features=256, d_feature0=512, d_feature1=128, dropout0=0.1): super(DenseAspp, self).__init__() self.ASPP_3 = _DenseAsppBlock(input_num=num_features, num1=d_feature0, num2=d_feature1, dilation_rate=3, drop_out=dropout0, bn_start=False) self.ASPP_6 = _DenseAsppBlock(input_num=(num_features + (d_feature1 * 1)), num1=d_feature0, num2=d_feature1, dilation_rate=6, drop_out=dropout0, bn_start=True) self.ASPP_12 = _DenseAsppBlock(input_num=(num_features + (d_feature1 * 2)), num1=d_feature0, num2=d_feature1, dilation_rate=12, drop_out=dropout0, bn_start=True) self.ASPP_18 = _DenseAsppBlock(input_num=(num_features + (d_feature1 * 3)), num1=d_feature0, num2=d_feature1, dilation_rate=18, drop_out=dropout0, bn_start=True) self.ASPP_24 = _DenseAsppBlock(input_num=(num_features + (d_feature1 * 4)), num1=d_feature0, num2=d_feature1, dilation_rate=24, drop_out=dropout0, bn_start=True) self.classification = nn.Sequential(nn.Dropout2d(p=dropout0), nn.Conv2d(in_channels=(num_features + (d_feature1 * 5)), out_channels=num_features, kernel_size=1, padding=0)) def forward(self, _input): feature = _input aspp3 = self.ASPP_3(feature) feature = torch.cat((aspp3, feature), dim=1) aspp6 = self.ASPP_6(feature) feature = torch.cat((aspp6, feature), dim=1) aspp12 = self.ASPP_12(feature) feature = torch.cat((aspp12, feature), dim=1) aspp18 = self.ASPP_18(feature) feature = torch.cat((aspp18, feature), dim=1) aspp24 = self.ASPP_24(feature) feature = torch.cat((aspp24, feature), dim=1) feature = self.classification(feature) return feature
def train_act(fold=0): print('fold', fold) from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.metrics import cohen_kappa_score from .spearman import spearman dataset = 'JDDC' (x, emo, act, action_num) = load_jddc(f'dataset/{dataset}.txt') ll = int((len(x) / 10)) train_x = (x[:(ll * fold)] + x[(ll * (fold + 1)):]) train_act = (act[:(ll * fold)] + act[(ll * (fold + 1)):]) test_x = x[(ll * fold):(ll * (fold + 1))] test_act = act[(ll * fold):(ll * (fold + 1))] print('build tf-idf') vectorizer = CountVectorizer() train_feature = vectorizer.fit_transform(train_x) test_feature = vectorizer.transform(test_x) lr = LogisticRegression() lr.fit(train_feature, train_act) prediction = lr.predict(test_feature) label = test_act acc = (sum([int((p == l)) for (p, l) in zip(prediction, label)]) / len(label)) precision = precision_score(label, prediction, average='macro', zero_division=0) recall = recall_score(label, prediction, average='macro', zero_division=0) f1 = f1_score(label, prediction, average='macro', zero_division=0) print(acc, precision, recall, f1) with open(f'outputs/{dataset}_act/lr_{fold}_0.txt', 'w', encoding='utf-8') as f: for (p, l) in zip(prediction, label): f.write(f'''{p}, {l} ''')
def initialize_weights(model): for m in model.modules(): t = type(m) if (t is nn.Conv2d): pass elif (t is nn.BatchNorm2d): m.eps = 0.001 m.momentum = 0.03 elif (t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]): m.inplace = True
def load_json(filename: str) -> dict: with open(filename, 'r') as json_f: return json.load(json_f)
def reduce_dict(data, to_item=False): for (key, val) in data.items(): data[key] = reduce_value(data[key], average=True, name=key) if to_item: data[key] = data[key].item() return data
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any: val = str(val) result: Any = [] if (val in NULL_VALUES): return [np.nan] if (not validate_ch_uid(val)): if (errors == 'raise'): raise ValueError(f'Unable to parse value {val}') error_result = (val if (errors == 'ignore') else np.nan) return [error_result] if (output_format == 'compact'): result = ([uid.compact(val)] + result) elif (output_format == 'standard'): result = ([uid.format(val)] + result) return result
def find_ascending(sequence: list, value, start: int=None, end: int=None): start = (0 if (start is None) else start) end = (len(sequence) if (end is None) else end) if (start >= end): return (None, None) elif ((start + 1) == end): if (sequence[start] == value): return (True, start) elif (sequence[start] < value): return (False, (start + 1)) else: return (False, start) mid = ((start + end) // 2) if (sequence[mid] <= value): return find_ascending(sequence, value, start=mid, end=end) else: return find_ascending(sequence, value, start=start, end=mid)
.skipif((parse_version(pyarrow.__version__) < parse_version('12.0.0')), reason='pyarrow >= 12.0.0 required for casting test') def test_type_cast(): array = ak.mask(np.array([1, 2, 3], dtype=np.uint8), [True, False, False]) arrow_array = pyarrow.array(array, type=pyarrow.float64()) assert pyarrow_types.is_float64(arrow_array.type) arrow_array = pyarrow.array(array) assert pyarrow_types.is_uint8(arrow_array.type)
(parallel=True) def calc_ppr_topk_parallel(indptr, indices, deg, alpha, epsilon, nodes, topk): js = ([np.zeros(0, dtype=np.int64)] * len(nodes)) vals = ([np.zeros(0, dtype=np.float32)] * len(nodes)) for i in numba.prange(len(nodes)): (j, val) = _calc_ppr_node(nodes[i], indptr, indices, deg, alpha, epsilon) (j_np, val_np) = (np.array(j), np.array(val)) idx_topk = np.argsort(val_np)[(- topk):] js[i] = j_np[idx_topk] vals[i] = val_np[idx_topk] return (js, vals)
class PixelizedModel(): def __init__(self, image): self.image = image.copy() self.image /= image.sum() self.setCentroid() self.createModel() self.x = self.x0 self.y = self.y0 self.amp = 1.0 self.convolve = None def setCentroid(self): (y, x) = numpy.indices(self.image.shape).astype(numpy.float32) self.x0 = (x * self.image).sum() self.y0 = (y * self.image).sum() def createModel(self, order=1): if (order == 1): self.model = self.image.copy() else: self.model = ndimage.spline_filter(self.image, output=numpy.float64, order=order) self.order = order def pixeval(self, x, y): X = ((x - self.x) + self.x0) Y = ((y - self.y) + self.y0) psf = ndimage.map_coordinates(self.model, [Y, X], prefilter=False) psf /= psf.sum() return (self.amp * psf)
class TFMobileBertForQuestionAnswering(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def CheckPosixThreading(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] for (single_thread_function, multithread_safe_function) in threading_list: ix = line.find(single_thread_function) if ((ix >= 0) and ((ix == 0) or ((not line[(ix - 1)].isalnum()) and (line[(ix - 1)] not in ('_', '.', '>'))))): error(filename, linenum, 'runtime/threadsafe_fn', 2, (((('Consider using ' + multithread_safe_function) + '...) instead of ') + single_thread_function) + '...) for improved thread safety.'))
class VariationOfInformation(ConfusionMatrixMetric): def __init__(self, metric: str='VARINFO'): super().__init__(metric) def calculate(self): tp = self.confusion_matrix.tp tn = self.confusion_matrix.tn fp = self.confusion_matrix.fp fn = self.confusion_matrix.fn n = self.confusion_matrix.n fn_tp = (fn + tp) fp_tp = (fp + tp) if ((fn_tp == 0) or ((fn_tp / n) == 1) or (fp_tp == 0) or ((fp_tp / n) == 1)): warnings.warn('Unable to compute variation of information due to log2 of 0, returning -inf', NotComputableMetricWarning) return float('-inf') h1 = (- (((fn_tp / n) * math.log2((fn_tp / n))) + ((1 - (fn_tp / n)) * math.log2((1 - (fn_tp / n)))))) h2 = (- (((fp_tp / n) * math.log2((fp_tp / n))) + ((1 - (fp_tp / n)) * math.log2((1 - (fp_tp / n)))))) p00 = (1 if (tn == 0) else (tn / n)) p01 = (1 if (fn == 0) else (fn / n)) p10 = (1 if (fp == 0) else (fp / n)) p11 = (1 if (tp == 0) else (tp / n)) h12 = (- (((((tn / n) * math.log2(p00)) + ((fn / n) * math.log2(p01))) + ((fp / n) * math.log2(p10))) + ((tp / n) * math.log2(p11)))) mi = ((h1 + h2) - h12) vi = ((h1 + h2) - (2 * mi)) return vi
.parametrize('observation_shape', [(100,)]) .parametrize('action_size', [2]) .parametrize('discrete_action', [False, True]) def test_vector_encoder_factory(observation_shape: Sequence[int], action_size: int, discrete_action: bool) -> None: factory = VectorEncoderFactory() encoder = factory.create(observation_shape) assert isinstance(encoder, VectorEncoder) encoder = factory.create_with_action(observation_shape, action_size, discrete_action) assert isinstance(encoder, VectorEncoderWithAction) assert (encoder._discrete_action == discrete_action) assert (factory.get_type() == 'vector') new_factory = VectorEncoderFactory.deserialize(factory.serialize()) assert (new_factory.hidden_units == factory.hidden_units) assert (new_factory.use_batch_norm == factory.use_batch_norm)
def inspect_index(features_hdfpath): features_hdf = h5py.File(features_hdfpath, 'r') keys = list(features_hdf.keys()) print(keys) attrs = list(features_hdf.attrs.keys()) print(attrs) index = np.random.randint(16, size=10) index = np.random.randint(low=0, high=3, size=10) print(index) index_list = [1, 2, 4] index_array = np.array(index_list) print(index_list) print(index_array) indexed_q_embeddings = features_hdf['ques_embeddings'][index_list] indexed_q_embeddings = features_hdf['ques_embeddings'][index_array] print(indexed_q_embeddings.shape)
class VQ(Benchmark): params = [[2, 10, 50], ['float32', 'float64']] param_names = ['k', 'dtype'] def __init__(self): rnd = np.random.RandomState(0) self.data = rnd.rand(5000, 5) self.cbook_source = rnd.rand(50, 5) def setup(self, k, dtype): self.obs = self.data.astype(dtype) self.cbook = self.cbook_source[:k].astype(dtype) def time_vq(self, k, dtype): vq(self.obs, self.cbook)
def main(): setup_default_logging() args = parser.parse_args() args.pretrained = (args.pretrained or (not args.checkpoint)) model = create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint) logging.info(('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()])))) config = resolve_data_config(vars(args), model=model) (model, test_time_pool) = apply_test_time_pool(model, config, args) if (args.num_gpu > 1): model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() else: model = model.cuda() loader = create_loader(Dataset(args.data), input_size=config['input_size'], batch_size=args.batch_size, use_prefetcher=True, interpolation=config['interpolation'], mean=config['mean'], std=config['std'], num_workers=args.workers, crop_pct=(1.0 if test_time_pool else config['crop_pct'])) model.eval() k = min(args.topk, args.num_classes) batch_time = AverageMeter() end = time.time() topk_ids = [] with torch.no_grad(): for (batch_idx, (input, _)) in enumerate(loader): input = input.cuda() labels = model(input) topk = labels.topk(k)[1] topk_ids.append(topk.cpu().numpy()) batch_time.update((time.time() - end)) end = time.time() if ((batch_idx % args.log_freq) == 0): logging.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(batch_idx, len(loader), batch_time=batch_time)) topk_ids = np.concatenate(topk_ids, axis=0).squeeze() with open(os.path.join(args.output_dir, './topk_ids.csv'), 'w') as out_file: filenames = loader.dataset.filenames() for (filename, label) in zip(filenames, topk_ids): filename = os.path.basename(filename) out_file.write('{0},{1},{2},{3},{4},{5}\n'.format(filename, label[0], label[1], label[2], label[3], label[4]))
class MultiKerv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, mapping=['translation'], kernel_type=['linear'], learnable_kernel=[False], kernel_regularizer=[False], alpha=0.03, balance=2, power=3, sigma=2, gamma=1, kernel_out_channels=[(- 1)]): super(MultiKerv2d, self).__init__() assert (out_channels == sum(kernel_out_channels)) assert (len(mapping) == len(kernel_type) == len(kernel_out_channels)) assert (len(mapping) == len(learnable_kernel) == len(kernel_regularizer)) self.kerv2d = [] self.output = ([None] * len(mapping)) for (i, channels) in enumerate(kernel_out_channels): assert (channels > 0) self.kerv2d.append(nn.Sequential(nn.Kerv2d(in_channels, channels, kernel_size, stride, padding, dilation, groups, bias, mapping=mapping[i], kernel_type=kernel_type[i], learnable_kernel=learnable_kernel[i], kernel_regularizer=kernel_regularizer[i], alpha=alpha, balance=balance, power=power, sigma=sigma, gamma=gamma), nn.BatchNorm2d(channels))) self.kerv2d[i].cuda() def forward(self, input): for (i, kerv) in enumerate(self.kerv2d): self.output[i] = kerv(input) return torch.cat(self.output, 1) def cuda(self, device_id=None): for kerv in self.kerv2d: kerv.cuda() return self._apply((lambda t: t.cuda(device_id))) def cpu(self): for kerv in self.kerv2d: kerv.cpu() return self._apply((lambda t: t.cpu()))
def cal_scores(pred_label, true_label, type_='all'): true_label_class = true_label pred_label_class = pred_label if (type_ != 'all'): assert (type_ in [0, 1, 2, 3]) idx = [i for i in range(len(true_label)) if (true_label[i] == type_)] true_label_class = np.array([true_label[i] for i in idx]) pred_label_class = np.array([pred_label[i] for i in idx]) assert (len(pred_label) == len(true_label)) assert (len(pred_label_class) == len(true_label_class)) check = (pred_label_class == true_label_class) check = check.astype(int) counter_pred = get_labels_counter(pred_label_class) counter_true = get_labels_counter(true_label_class) acc = (np.sum(check) / float(len(pred_label_class))) if (type_ == 'all'): (pre, recall, f_score, _) = precision_recall_fscore_support(true_label, pred_label, average='macro') else: (pre, recall, f_score, _) = precision_recall_fscore_support(true_label, pred_label, average='macro', labels=[type_]) return (acc, pre, recall, f_score, counter_true, counter_pred)
class NonRecursiveTreeWalker(TreeWalker): def getNodeDetails(self, node): raise NotImplementedError def getFirstChild(self, node): raise NotImplementedError def getNextSibling(self, node): raise NotImplementedError def getParentNode(self, node): raise NotImplementedError def __iter__(self): currentNode = self.tree while (currentNode is not None): details = self.getNodeDetails(currentNode) (type, details) = (details[0], details[1:]) hasChildren = False if (type == DOCTYPE): (yield self.doctype(*details)) elif (type == TEXT): for token in self.text(*details): (yield token) elif (type == ELEMENT): (namespace, name, attributes, hasChildren) = details if (((not namespace) or (namespace == namespaces['html'])) and (name in voidElements)): for token in self.emptyTag(namespace, name, attributes, hasChildren): (yield token) hasChildren = False else: (yield self.startTag(namespace, name, attributes)) elif (type == COMMENT): (yield self.comment(details[0])) elif (type == ENTITY): (yield self.entity(details[0])) elif (type == DOCUMENT): hasChildren = True else: (yield self.unknown(details[0])) if hasChildren: firstChild = self.getFirstChild(currentNode) else: firstChild = None if (firstChild is not None): currentNode = firstChild else: while (currentNode is not None): details = self.getNodeDetails(currentNode) (type, details) = (details[0], details[1:]) if (type == ELEMENT): (namespace, name, attributes, hasChildren) = details if ((namespace and (namespace != namespaces['html'])) or (name not in voidElements)): (yield self.endTag(namespace, name)) if (self.tree is currentNode): currentNode = None break nextSibling = self.getNextSibling(currentNode) if (nextSibling is not None): currentNode = nextSibling break else: currentNode = self.getParentNode(currentNode)
class ModelInfo(): def __init__(self, modelId: Optional[str]=None, tags: List[str]=[], pipeline_tag: Optional[str]=None, siblings: Optional[List[Dict]]=None, **kwargs): self.modelId = modelId self.tags = tags self.pipeline_tag = pipeline_tag self.siblings = ([ModelSibling(**x) for x in siblings] if (siblings is not None) else None) for (k, v) in kwargs.items(): setattr(self, k, v)
class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0, leakyRate=0.01, actBeforeRes=True): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.LeakyReLU(leakyRate, inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.LeakyReLU(leakyRate, inplace=True) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None) self.activateBeforeResidual = actBeforeRes def forward(self, x): if ((not self.equalInOut) and self.activateBeforeResidual): x = self.relu1(self.bn1(x)) out = self.conv1(x) else: out = self.conv1(self.relu1(self.bn1(x))) if (self.droprate > 0): out = F.dropout(out, p=self.droprate, training=self.training) out = self.conv2(self.relu2(self.bn2(out))) res = (self.convShortcut(x) if (not self.equalInOut) else x) return torch.add(res, out)
class MptTransformer(StateDictSerializationMixin, eqx.Module): config: MptConfig = eqx.static_field() blocks: Stacked[MptBlock] norm_f: hnn.LayerNorm def Layers(self) -> Axis: return self.config.Layers def init(config: MptConfig, *, key): blocks = Stacked.init(config.Layers, MptBlock, gradient_checkpointing=True)(config, key=shaped_rng_split(key, config.n_layers)) norm_f = hnn.LayerNorm.init(config.Embed, use_bias=config.use_bias) return MptTransformer(config, blocks, norm_f) _call def __call__(self, hidden_states: NamedArray, attention_mask: Optional[(AttentionMask | NamedArray)], *, key) -> NamedArray: if self.config.attn_config.alibi: bias = _mpt_build_alibi_bias(self.config.Head, self.config.KeyPos, self.config.attn_config.alibi_bias_max) else: bias = None key = (maybe_rng_split(key, self.Layers.size) if (key is not None) else None) hidden_states = self.blocks.fold(hidden_states, attn_bias=bias, attention_mask=attention_mask, key=key) hidden_states = self.norm_f(hidden_states) return hidden_states def from_state_dict(self, state_dict: StateDict, prefix: Optional[str]=None): stacked = stack_state_dict(state_dict, prefix=apply_prefix(prefix, 'blocks')) out = super().from_state_dict(stacked, prefix=prefix) return out def update_state_dict(self, state_dict: StateDict, prefix: Optional[str]=None) -> StateDict: my_state_dict: StateDict = {} super().update_state_dict(my_state_dict, prefix) stacked_dict = unstack_state_dict(my_state_dict, apply_prefix(prefix, 'blocks')) state_dict.update(stacked_dict) return state_dict
class RGBArrayAsObservationWrapper(dm_env.Environment): '\n\tUse env.render(rgb_array) as observation\n\trather than the observation environment provides\n\n\tFrom: def __init__(self, env, width=84, height=84): self._env = env self._width = width self._height = height self._env.reset() dummy_obs = self._env.render(mode='rgb_array', width=self._width, height=self._height) self.observation_space = spaces.Box(low=0, high=255, shape=(height, width, 3), dtype=dummy_obs.dtype) self.action_space = self._env.action_space wrapped_action_spec = self.action_space if (not hasattr(wrapped_action_spec, 'minimum')): wrapped_action_spec.minimum = (- np.ones(wrapped_action_spec.shape)) if (not hasattr(wrapped_action_spec, 'maximum')): wrapped_action_spec.maximum = np.ones(wrapped_action_spec.shape) self._action_spec = specs.BoundedArray(wrapped_action_spec.shape, np.float32, wrapped_action_spec.minimum, wrapped_action_spec.maximum, 'action') self._obs_spec = {} self._obs_spec['pixels'] = specs.BoundedArray(shape=self.observation_space.shape, dtype=np.uint8, minimum=0, maximum=255, name='observation') def reset(self, **kwargs): obs = {} obs = self._env.reset(**kwargs) obs['pixels'] = obs['pixels'].astype(np.uint8) obs['goal_achieved'] = False return obs def step(self, action): (observation, reward, done, info) = self._env.step(action) obs = {} obs['pixels'] = observation['pixels'].astype(np.uint8) obs['goal_achieved'] = info['is_success'] return (obs, reward, done, info) def observation_spec(self): return self._obs_spec def action_spec(self): return self._action_spec def render(self, mode='rgb_array', width=256, height=256): return self._env.render(mode='rgb_array', width=width, height=height) def __getattr__(self, name): return getattr(self._env, name)
def plot_synthetic(): fname = 'datasets/SBM_processed/config_edgelist.txt' max_nodes = 100 max_time = 150 G_times = SBM_loader.load_temporarl_edgelist(fname, max_nodes=max_nodes, max_time=max_time) graph_name = 'synthetic' outliers = normal_util.plot_edges(G_times, graph_name) normal_util.plot_num_components_undirected(G_times, graph_name) print(outliers)
def softmax_loss(model, images, labels, backwards=False): logits = model(images) criterion = torch.nn.CrossEntropyLoss(reduction='mean') loss = criterion(logits, labels.view((- 1))) if (backwards and loss.requires_grad): loss.backward() return loss
def create_validation_iterator(train: LAMLDataset, valid: Optional[LAMLDataset]=None, n_folds: Optional[int]=None, cv_iter: Optional[Callable]=None) -> TrainValidIterator: if (type(train) in [PandasDataset, NumpyDataset, CSRSparseDataset]): train = cast(NpDataset, train) valid = cast(NpDataset, valid) iterator = get_numpy_iterator(train, valid, n_folds, cv_iter) elif (valid is not None): iterator = HoldoutIterator(train, valid) else: iterator = DummyIterator(train) return iterator
def sample_generator(dataset, model, tokenizer, shuffle, pad_to_multiple_of=None): if shuffle: sample_ordering = np.random.permutation(len(dataset)) else: sample_ordering = np.arange(len(dataset)) for sample_idx in sample_ordering: example = dataset[int(sample_idx)] example = tokenizer.pad(example, return_tensors='np', pad_to_multiple_of=pad_to_multiple_of) example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int32) for (key, arr) in example.items()} if ((model is not None) and hasattr(model, 'prepare_decoder_input_ids_from_labels')): decoder_input_ids = model.prepare_decoder_input_ids_from_labels(labels=tf.expand_dims(example['labels'], 0)) example['decoder_input_ids'] = tf.squeeze(decoder_input_ids, 0) (yield (example, example['labels'])) return
class MultiParameterSetCBreakoutWorld(ActionStrengthSetABreakoutWorld, OffsetBricksSetABreakoutWorld, SideObstacleSetABreakoutWorld): warnings.warn('This env. parameter was dropped and should no longer be used.', DeprecationWarning) pass
def get_vocab(file): word_dict = defaultdict(int) with open(file, 'r') as f: lines = f.readlines() for line in lines: words = line.strip().split() for w in [i for i in words if (i not in punctuation)]: word_dict[w] += 1 print(len(word_dict)) word_dict = {k: v for (k, v) in sorted(word_dict.items(), key=(lambda item: item[1]), reverse=True)} return word_dict
class FpxImageFile(ImageFile.ImageFile): format = 'FPX' format_description = 'FlashPix' def _open(self): try: self.ole = olefile.OleFileIO(self.fp) except OSError: raise SyntaxError('not an FPX file; invalid OLE file') if (self.ole.root.clsid != '-C154-11CE-8553-00AA00A1F95B'): raise SyntaxError('not an FPX file; bad root CLSID') self._open_index(1) def _open_index(self, index=1): prop = self.ole.getproperties([('Data Object Store %06d' % index), '\x05Image Contents']) self._size = (prop[], prop[]) size = max(self.size) i = 1 while (size > 64): size = (size / 2) i += 1 self.maxid = (i - 1) id = (self.maxid << 16) s = prop[( | id)] colors = [] bands = i32(s, 4) if (bands > 4): raise IOError('Invalid number of bands') for i in range(bands): colors.append((i32(s, (8 + (i * 4))) & )) (self.mode, self.rawmode) = MODES[tuple(colors)] self.jpeg = {} for i in range(256): id = ( | (i << 16)) if (id in prop): self.jpeg[i] = prop[id] self._open_subimage(1, self.maxid) def _open_subimage(self, index=1, subimage=0): stream = [('Data Object Store %06d' % index), ('Resolution %04d' % subimage), 'Subimage 0000 Header'] fp = self.ole.openstream(stream) fp.read(28) s = fp.read(36) size = (i32(s, 4), i32(s, 8)) tilesize = (i32(s, 16), i32(s, 20)) offset = i32(s, 28) length = i32(s, 32) if (size != self.size): raise OSError('subimage mismatch') fp.seek((28 + offset)) s = fp.read((i32(s, 12) * length)) x = y = 0 (xsize, ysize) = size (xtile, ytile) = tilesize self.tile = [] for i in range(0, len(s), length): compression = i32(s, (i + 8)) if (compression == 0): self.tile.append(('raw', (x, y, (x + xtile), (y + ytile)), (i32(s, i) + 28), self.rawmode)) elif (compression == 1): self.tile.append(('fill', (x, y, (x + xtile), (y + ytile)), (i32(s, i) + 28), (self.rawmode, s[12:16]))) elif (compression == 2): internal_color_conversion = i8(s[14]) jpeg_tables = i8(s[15]) rawmode = self.rawmode if internal_color_conversion: if (rawmode == 'RGBA'): (jpegmode, rawmode) = ('YCbCrK', 'CMYK') else: jpegmode = None else: jpegmode = rawmode self.tile.append(('jpeg', (x, y, (x + xtile), (y + ytile)), (i32(s, i) + 28), (rawmode, jpegmode))) if jpeg_tables: self.tile_prefix = self.jpeg[jpeg_tables] else: raise OSError('unknown/invalid compression') x = (x + xtile) if (x >= xsize): (x, y) = (0, (y + ytile)) if (y >= ysize): break self.stream = stream self.fp = None def load(self): if (not self.fp): self.fp = self.ole.openstream((self.stream[:2] + ['Subimage 0000 Data'])) return ImageFile.ImageFile.load(self)
def computeSequenceClassificationRecall(outputs, targets, tasks): targets = [target[0] for target in targets] label2id = tasks[0].label2id outputs = [label2id[output] for output in outputs] targets = [label2id[target] for target in targets] recall_metric = load_metric('recall') return (recall_metric.compute(references=targets, predictions=outputs)['recall'] * 100)
class CompareContinuousActionDiffEvaluator(EvaluatorProtocol): _base_algo: QLearningAlgoProtocol _episodes: Optional[Sequence[EpisodeBase]] def __init__(self, base_algo: QLearningAlgoProtocol, episodes: Optional[Sequence[EpisodeBase]]=None): self._base_algo = base_algo self._episodes = episodes def __call__(self, algo: QLearningAlgoProtocol, dataset: ReplayBuffer) -> float: total_diffs = [] episodes = (self._episodes if self._episodes else dataset.episodes) for episode in episodes: for batch in make_batches(episode, WINDOW_SIZE, dataset.transition_picker): base_actions = self._base_algo.predict(batch.observations) actions = algo.predict(batch.observations) diff = ((actions - base_actions) ** 2).sum(axis=1).tolist() total_diffs += diff return float(np.mean(total_diffs))
class EvalProcess(multiprocessing.Process): def __init__(self, idx, gpu_index, path_list, kb_mode, dataset_obj, model_cfg, infer_cfg, dump_dir, max_sequence_len, verbose=False, **kwargs): super(EvalProcess, self).__init__() self.idx = idx self.gpu_index = gpu_index self.path_list = path_list self.kb_mode = kb_mode self.dataset_obj = dataset_obj self.model_cfg = model_cfg self.infer_cfg = infer_cfg self.dump_dir = dump_dir self.max_sequence_len = max_sequence_len self.verbose = verbose assert (os.path.exists(self.dump_dir) and os.path.isdir(self.dump_dir)) use_filtered_ent = kwargs.get('use_filtered_ent') if (isinstance(use_filtered_ent, bool) and use_filtered_ent): self.use_filtered_ent = True else: self.use_filtered_ent = False alter_ner_dir = kwargs.get('alter_ner_dir') if (isinstance(alter_ner_dir, str) and os.path.exists(alter_ner_dir) and os.path.isdir(alter_ner_dir)): self.alter_ner_dir = alter_ner_dir else: self.alter_ner_dir = None use_op_type_constraint = kwargs.get('use_op_type_constraint') if (isinstance(use_op_type_constraint, bool) and use_op_type_constraint): self.use_op_type_constraint = True else: self.use_op_type_constraint = False timeout = kwargs.get('timeout') if (timeout is not None): self.timeout = timeout else: self.timeout = 5.0 logging.info('In process {}, use_filtered_ent is {}'.format(self.idx, self.use_filtered_ent)) logging.info('In process {}, alter_ner_dir is {}'.format(self.idx, self.alter_ner_dir)) logging.info('In process {}, use_op_type_constraint is {}'.format(self.idx, self.use_op_type_constraint)) self.daemon = True def run(self): os.environ['CUDA_VISIBLE_DEVICES'] = str(self.infer_cfg['gpu']) feature_list = self.dataset_obj.process_test_data(self.path_list) g = tf.Graph() with g.as_default(): with tf.device('/device:GPU:{}'.format(self.gpu_index)): with tf.variable_scope('model') as scope: model_obj = self.model_cfg['model_class'](self.model_cfg, self.dataset_obj.tokenizer, self.model_cfg['dataset'], self.dataset_obj.get_labels_dict(), self.model_cfg['max_sequence_len'], 1000, scope.name) graph_handler = GraphHandler(model_obj, self.infer_cfg) evaluator = E2eEvaluator(model_obj, self.infer_cfg) sess = graph_handler.initialize() logging.info('loading inverse_index...') inverse_index = (load_json('data/EDL/inverse_index_spacy_token.json') if (self.alter_ner_dir is None) else None) logging.info('building lf executor') lf_executor = LfExecutor(kb_mode=self.kb_mode, use_op_type_constraint=self.use_op_type_constraint) logging.info('Done') top1_pred = [] dev_dict = {} recall = {} precision = {} _feature_ptr = 0 for (_idx_file, _file_path) in tqdm(enumerate(self.path_list), total=len(self.path_list)): _dump_path = os.path.join(self.dump_dir, os.path.basename(_file_path)) _raw_data = load_json(_file_path) assert ((len(_raw_data) % 2) == 0) _num_turns = (len(_raw_data) // 2) _proc_features = feature_list[_feature_ptr:(_feature_ptr + _num_turns)] _feature_ptr += _num_turns for _idx_t in range(_num_turns): assert (_raw_data[(_idx_t * 2)]['utterance'] == _proc_features[_idx_t]['utterances']['cur_q']) assert (_raw_data[((_idx_t * 2) + 1)]['utterance'] == _proc_features[_idx_t]['utterances']['cur_a']) _out_list = None if (os.path.exists(_dump_path) and os.path.isfile(_dump_path)): try: _out_list = load_pickle(_dump_path) assert (len(_out_list) == _num_turns) for _idx_t in range(_num_turns): assert (_out_list[_idx_t]['cur_question_type'] == _raw_data[(_idx_t * 2)]['question-type']) except: _out_list = None if (_out_list is None): _out_list = evaluator.decoding(sess, _proc_features, lf_executor, inverse_index, BaseProcessor.dict_e2t, self.dataset_obj.get_labels_dict()['EOs']['labels'], self.dataset_obj.get_labels_dict()['sketch']['labels'], self.dataset_obj.get_labels_dict()['predicates']['labels'], self.dataset_obj.get_labels_dict()['types']['labels'], batch_size=20, max_seq_len=self.max_sequence_len, timeout=self.timeout, use_filtered_ent=self.use_filtered_ent, alter_ner_dir=self.alter_ner_dir) assert (len(_out_list) == _num_turns) save_pickle(_out_list, _dump_path) if self.verbose: for _out in _out_list: accumulative_eval(_out['gold_answer'], _out['cur_question_type'], _out['prev_question_type'], _out['top1'], _out['predicted_answer'], top1_pred, dev_dict, recall, precision) if (self.verbose and (((_idx_file + 1) % 40) == 0)): logging.info('') logging.info(('=' * 30)) logging.info('From process {}'.format(self.idx)) smp_result_print_wrt_qt(top1_pred, dev_dict, recall, precision) logging.info(('=' * 30))
_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron') def transformer_lm_megatron(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', (3072 * 4)) args.decoder_layers = getattr(args, 'decoder_layers', 72) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args)
class Timings(): def __init__(self): self._means = collections.defaultdict(int) self._vars = collections.defaultdict(int) self._counts = collections.defaultdict(int) self.reset() def reset(self): self.last_time = timeit.default_timer() def time(self, name): now = timeit.default_timer() x = (now - self.last_time) self.last_time = now n = self._counts[name] mean = (self._means[name] + ((x - self._means[name]) / (n + 1))) var = ((((n * self._vars[name]) + (n * ((self._means[name] - mean) ** 2))) + ((x - mean) ** 2)) / (n + 1)) self._means[name] = mean self._vars[name] = var self._counts[name] += 1 def means(self): return self._means def vars(self): return self._vars def stds(self): return {k: (v ** 0.5) for (k, v) in self._vars.items()} def summary(self, prefix=''): means = self.means() stds = self.stds() total = sum(means.values()) result = prefix for k in sorted(means, key=means.get, reverse=True): result += (f''' %s: %.6fms +- %.6fms (%.2f%%) ''' % (k, (1000 * means[k]), (1000 * stds[k]), ((100 * means[k]) / total))) result += ('\nTotal: %.6fms' % (1000 * total)) return result
_model def gluon_resnet18_v1b(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['gluon_resnet18_v1b'] model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, in_chans=in_chans, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model