code
stringlengths
101
5.91M
class MobileNetV1Config(PretrainedConfig): model_type = 'mobilenet_v1' def __init__(self, num_channels=3, image_size=224, depth_multiplier=1.0, min_depth=8, hidden_act='relu6', tf_padding=True, classifier_dropout_prob=0.999, initializer_range=0.02, layer_norm_eps=0.001, **kwargs): super().__init__(**kwargs) if (depth_multiplier <= 0): raise ValueError('depth_multiplier must be greater than zero.') self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.min_depth = min_depth self.hidden_act = hidden_act self.tf_padding = tf_padding self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps
def init_seed(seed): torch.cuda.manual_seed_all(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def calculate_uplift_at_top(y_true: np.ndarray, uplift_pred: np.ndarray, treatment: np.ndarray, top: float=30): uplift_percentile = np.percentile(uplift_pred, (100 - top)) mask_top = (uplift_pred > uplift_percentile) control_true_top = y_true[((treatment == 0) & mask_top)].sum() treatment_true_top = y_true[((treatment == 1) & mask_top)].sum() n_control_samples = (treatment[mask_top] == 0).sum() n_treatment_samples = (treatment[mask_top] == 1).sum() mean_control_value = ((control_true_top / n_control_samples) if (n_control_samples > 0) else 0.0) mean_treatment_value = ((treatment_true_top / n_treatment_samples) if (n_treatment_samples > 0) else 0.0) score = (mean_treatment_value - mean_control_value) return score
class LPPool2d(_LPPoolNd): kernel_size: _size_2_t stride: _size_2_t def forward(self, input: Tensor) -> Tensor: return cF.complex_fcaller(F.lp_pool2d, input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode)
def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True, file=sys.stdout): if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') pool = init_pool(nproc, initializer, initargs) start = (not skip_first) task_num -= ((nproc * chunksize) * int(skip_first)) prog_bar = ProgressBar(task_num, bar_width, start, file=file) results = [] if keep_order: gen = pool.imap(func, tasks, chunksize) else: gen = pool.imap_unordered(func, tasks, chunksize) for result in gen: results.append(result) if skip_first: if (len(results) < (nproc * chunksize)): continue elif (len(results) == (nproc * chunksize)): prog_bar.start() continue prog_bar.update() prog_bar.file.write('\n') pool.close() pool.join() return results
class DelegatorData(): def __init__(self, name, construct, skip_methods=(), fit_args=make_classification(random_state=0)): self.name = name self.construct = construct self.fit_args = fit_args self.skip_methods = skip_methods
def register_types_ns3_Config(module): root_module = module.get_root() module.add_class('MatchContainer', import_from_module='ns.core') typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator', u'ns3::Config::MatchContainer::Iterator') typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator*', u'ns3::Config::MatchContainer::Iterator*') typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator&', u'ns3::Config::MatchContainer::Iterator&') module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type=u'vector') module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector')
def get_sub_token_ids(question_tokens, span_ids, tu): (st, ed) = span_ids prefix_tokens = question_tokens[:st] prefix = tu.tokenizer.convert_tokens_to_string(prefix_tokens) prefix_sub_tokens = tu.tokenizer.tokenize(prefix) span_tokens = question_tokens[st:ed] span = tu.tokenizer.convert_tokens_to_string(span_tokens) span_sub_tokens = tu.tokenizer.tokenize(span) return (len(prefix_sub_tokens), (len(prefix_sub_tokens) + len(span_sub_tokens)))
def cubic_param_shape(initializer: Callable, extents: np.ndarray, pixel_spacing: float, control_point_spacing: float, pos: Union[(np.ndarray, goos.Function)], var_name: Optional[str]=None, reflection_symmetry: List[int]=None, periods: List[int]=None, **kwargs) -> Tuple[(goos.Variable, Shape)]: from spins.goos import compat from spins.invdes.problem_graph import optplan if (not isinstance(pos, goos.Function)): pos = goos.Constant(pos) return compat.compat_param(param=optplan.CubicParametrization(undersample=(control_point_spacing / pixel_spacing), reflection_symmetry=reflection_symmetry, periods=periods), initializer=initializer, extents=extents, pixel_size=[pixel_spacing, pixel_spacing, extents[2]], pos=pos, var_name=var_name, **kwargs)
class RelativeRamifiedExtensionRingCappedRelative(EisensteinExtensionGeneric, pAdicCappedRelativeRingGeneric): def __init__(self, exact_modulus, approx_modulus, prec, print_mode, shift_seed, names, implementation): self._exact_modulus = exact_modulus unram_prec = (((prec + approx_modulus.degree()) - 1) // approx_modulus.degree()) KFP = approx_modulus.base_ring().change(show_prec=False, type='floating-point') self.prime_pow = PowComputer_relative_maker(approx_modulus.base_ring().prime(), max(min((unram_prec - 1), 30), 1), unram_prec, prec, False, exact_modulus.change_ring(KFP), shift_seed.change_ring(KFP), 'capped-rel') self._implementation = 'Polynomial' EisensteinExtensionGeneric.__init__(self, approx_modulus, prec, print_mode, names, RelativeRamifiedCappedRelativeElement) from .relative_ramified_CR import pAdicCoercion_ZZ_CR, pAdicConvert_QQ_CR self.register_coercion(pAdicCoercion_ZZ_CR(self)) self.register_coercion(pAdicRelativeBaseringInjection(approx_modulus.base_ring(), self)) self.register_conversion(pAdicConvert_QQ_CR(self))
def runtime_fn(logfile_path): runtime = None with open(logfile_path, 'r') as f: lines = f.readlines() for line in lines[(- 10):]: m = re.match('Mean allocation computation time: (\\d+\\.\\d+) seconds', line) if (m is not None): runtime = round(float(m.group(1)), 3) return runtime
def validate_bg_pnf(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(pnf.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(pnf.is_valid) else: return df.applymap(pnf.is_valid) return pnf.is_valid(df)
def contained_in(filename, directory): filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return (os.path.commonprefix([filename, directory]) == directory)
def trainLRModel(train_all, train_label, window_size_list, ngram_extract_mode, flag, save_model=False): train_ngram_all = tokenExtraction(window_size_list, train_all, mode=ngram_extract_mode) (train_ngram_counter, train_ngram_dict) = buildTrainDict(train_ngram_all, verbose=False, set_threshold=True, threshold=1) train_features_idx = convertFeature2Idx(train_ngram_all, train_ngram_dict) train_features_no_dup = [] for line in train_features_idx: train_features_no_dup.append(list(set(line))) train_idx_sparse = convertToSparseMatrix(train_features_no_dup, train_ngram_dict) lr = LogisticRegression(solver='lbfgs') lr.fit(train_idx_sparse['sparse_matrix'], train_label) print('[I] logistic regression training completed.') print(('[I] training set dimension: ' + str(np.shape(train_idx_sparse['sparse_matrix'])))) if save_model: with open((('./trained_model/' + flag) + '_lr_model.pkl'), 'wb') as f: pickle.dump(lr, f) with open((('./trained_model/' + flag) + '_train_ngram_counter.json'), 'w') as f: json.dump(train_ngram_counter, f) with open((('./trained_model/' + flag) + '_train_ngram_dict.json'), 'w') as f: json.dump(train_ngram_dict, f) print('[I] all model files have been saved.') return (lr, train_ngram_dict)
class RandomTransforms(object): def __init__(self, transforms): assert isinstance(transforms, (list, tuple)) self.transforms = transforms def __call__(self, *args, **kwargs): raise NotImplementedError()
def findCosineDistance(source_representation: Union[(np.ndarray, list)], test_representation: Union[(np.ndarray, list)]) -> np.float64: if isinstance(source_representation, list): source_representation = np.array(source_representation) if isinstance(test_representation, list): test_representation = np.array(test_representation) a = np.matmul(np.transpose(source_representation), test_representation) b = np.sum(np.multiply(source_representation, source_representation)) c = np.sum(np.multiply(test_representation, test_representation)) return (1 - (a / (np.sqrt(b) * np.sqrt(c))))
class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, sr_ratio=1): super().__init__() assert ((dim % num_heads) == 0), f'dim {dim} should be divided by num_heads {num_heads}.' self.dim = dim self.num_heads = num_heads head_dim = (dim // num_heads) self.scale = (qk_scale or (head_dim ** (- 0.5))) self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, (dim * 2), bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if (sr_ratio > 1): self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) fan_out //= m.groups m.weight.data.normal_(0, math.sqrt((2.0 / fan_out))) if (m.bias is not None): m.bias.data.zero_() def forward(self, x, H, W): (B, N, C) = x.shape q = self.q(x).reshape(B, N, self.num_heads, (C // self.num_heads)).permute(0, 2, 1, 3) if (self.sr_ratio > 1): x_ = x.permute(0, 2, 1).reshape(B, C, H, W) x_ = self.sr(x_).reshape(B, C, (- 1)).permute(0, 2, 1) x_ = self.norm(x_) kv = self.kv(x_).reshape(B, (- 1), 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4) else: kv = self.kv(x).reshape(B, (- 1), 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4) (k, v) = (kv[0], kv[1]) attn_ = (q k.transpose((- 2), (- 1))) attn = (attn_ * self.scale).softmax(dim=(- 1)) attn = self.attn_drop(attn) x = (attn v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) attn_copy = attn_.clone().reshape(B, self.num_heads, H, W, attn.shape[(- 1)]) if (self.sr_ratio > 1): attn_copy = F.avg_pool3d(attn_copy, kernel_size=(self.sr_ratio, self.sr_ratio, 1), stride=(self.sr_ratio, self.sr_ratio, 1)) attn_copy = attn_copy.reshape((- 1), self.num_heads, attn.shape[(- 1)], attn.shape[(- 1)]) return (x, attn_copy)
_testing def test_random_chain_complex(level=1, trials=1, verbose=False): deprecation(33777, 'the CHomP interface is deprecated; hence so is this function') for i in range(trials): C = random_chain_complex(level=level) for d in C.differential(): chomp = C.homology(d, verbose=verbose) no_chomp = C.homology(d, algorithm='no_chomp', verbose=verbose) if (chomp != no_chomp): print(('Homology in dimension %s according to CHomP: %s' % (d, chomp))) print(('Homology in dimension %s according to Sage: %s' % (d, no_chomp))) print(('Chain complex: %s' % C.differential())) raise ValueError
def process_single_table(table, all_entity_set, min_num=3): processed_data = {} core_entities = {} table_id = table.get('_id', '') pgTitle = table.get('pgTitle', '').lower() pgEnt = table.get('pgId', (- 1)) if (pgEnt not in all_entity_set): pgEnt = (- 1) secTitle = table.get('sectionTitle', '').lower() caption = table.get('tableCaption', '').lower() headers = table.get('processed_tableHeaders', []) rows = table.get('tableData', {}) entity_columns = table.get('entityColumn', []) entity_cells = np.array(table.get('entityCell', [[]])) subject = table['subject_column'] for (i, j) in zip(*entity_cells.nonzero()): if (j == subject): e = rows[i][j]['surfaceLinks'][0]['target']['id'] if (e in all_entity_set): core_entities[i] = (e, rows[i][j]['text']) for (i, j) in zip(*entity_cells.nonzero()): if ((j != subject) and (i in core_entities)): e = rows[i][j]['surfaceLinks'][0]['target']['id'] if (e in all_entity_set): if (j not in processed_data): processed_data[j] = [] processed_data[j].append([core_entities[i], e]) final_data = [] for j in processed_data: if (len(processed_data[j]) > min_num): final_data.append([table_id, pgEnt, pgTitle, secTitle, caption, [headers[0], headers[j]], processed_data[j]]) return final_data
def _random_dismantlable_lattice(n): from sage.misc.prandom import randint D = DiGraph({0: [(n - 1)]}) for i in range(1, (n - 1)): a = randint(0, (i // 2)) b_ = list(D.depth_first_search(a)) b = b_[randint(1, (len(b_) - 1))] D.add_vertex(i) D.add_edge(a, i) D.add_edge(i, b) D.delete_edge(a, b) return D
def re_match(utter, value): search_span = re.search((('[?,.! ]' + value) + '[?,.! ]'), ((' ' + utter) + ' ')) if search_span: return True else: return False
_cache def get_request_signature() -> inspect.Signature: import requests return inspect.signature(requests.Request)
def distributed_init(config): if (config.distributed.world_size == 1): raise ValueError('Cannot initialize distributed with distributed_world_size=1') logger.info(f'XLA Mode:{is_xla()}') if is_xla(): config.device_id = xm.get_local_ordinal() config.distributed.rank = xm.get_ordinal() elif dist.is_initialized(): warnings.warn('Distributed is already initialized, cannot initialize twice!') config.distributed.rank = dist.get_rank() else: logger.info(f'Distributed Init (Rank {config.distributed.rank}): {config.distributed.init_method}') nccl_config = config.distributed.get('nccl', {}) if nccl_config.get('nsocks_perthread', None): os.environ['NCCL_NSOCKS_PERTHREAD'] = str(nccl_config['nsocks_perthread']) logger.info(f"NCCL_NSOCKS_PERTHREAD: {os.environ['NCCL_NSOCKS_PERTHREAD']}") if nccl_config.get('socket_nthreads', None): os.environ['NCCL_SOCKET_NTHREADS'] = str(nccl_config['socket_nthreads']) logger.info(f"NCCL_SOCKET_NTHREADS: {os.environ['NCCL_SOCKET_NTHREADS']}") dist.init_process_group(backend=config.distributed.backend, init_method=config.distributed.init_method, world_size=config.distributed.world_size, rank=config.distributed.rank) logger.info(f'Initialized Host {socket.gethostname()} as Rank {config.distributed.rank}') if (('MASTER_ADDR' not in os.environ) or ('MASTER_PORT' not in os.environ)): split = config.distributed.init_method.split('//') assert (len(split) == 2), ("host url for distributed should be split by '//' " + 'into exactly two elements') split = split[1].split(':') assert (len(split) == 2), 'host url should be of the form <host_url>:<host_port>' os.environ['MASTER_ADDR'] = split[0] os.environ['MASTER_PORT'] = split[1] dist.all_reduce(torch.zeros(1).cuda()) suppress_output(is_main()) config.distributed.rank = dist.get_rank() return config.distributed.rank
def islong_doublefunction(rout): if (not isfunction(rout)): return 0 if ('result' in rout): a = rout['result'] else: a = rout['name'] if (a in rout['vars']): return islong_double(rout['vars'][a]) return 0
class Afformer(nn.Module): def __init__(self, encoder: nn.Module, decoder: nn.Module, predictor: nn.Module): super().__init__() self.encoder = encoder self.decoder = decoder self.predictor = predictor def forward(self, batch): (images, videos, num_frames_list) = batch[:3] (images, videos) = self.encoder(images, videos) (heatmaps, action_logits) = self.predictor(self.decoder(images, videos, num_frames_list)) if self.training: (gt_heatmaps, gt_actions) = batch[(- 2):] if (gt_actions is not None): heatmaps = torch.cat([heatmap[gt_action] for (heatmap, gt_action) in zip(heatmaps, gt_actions)]) gt_heatmaps = torch.cat(gt_heatmaps).view_as(heatmaps) gt_action_logits = torch.zeros_like(action_logits, device=action_logits.device, dtype=action_logits.dtype) for (i, gt_action) in enumerate(gt_actions): gt_action_logits[i].scatter_(0, gt_action, 1) loss_action = F.binary_cross_entropy_with_logits(action_logits, gt_action_logits, reduction='mean') else: gt_heatmaps = gt_heatmaps.view_as(heatmaps) loss_action = None gt_heatmaps = gt_heatmaps.flatten(1) gt_heatmaps = (gt_heatmaps / gt_heatmaps.sum(dim=1, keepdim=True)) heatmaps = heatmaps.flatten(1).log_softmax(dim=1) loss_heatmap = F.kl_div(heatmaps, gt_heatmaps, reduction='batchmean') loss = (loss_heatmap if (loss_action is None) else (loss_heatmap + loss_action)) return dict(loss=loss, loss_heatmap=loss_heatmap, loss_action=loss_action) else: heatmaps = heatmaps.flatten(2).softmax(dim=(- 1)) return (heatmaps, action_logits.sigmoid_())
('data.dtd', 'class') class DTDData(base.ImageTfdsData): def __init__(self, data_dir=None): dataset_builder = tfds.builder('dtd:3.*.*', data_dir=data_dir) dataset_builder.download_and_prepare() tfds_splits = {'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'} train_count = dataset_builder.info.splits['train'].num_examples val_count = dataset_builder.info.splits['validation'].num_examples test_count = dataset_builder.info.splits['test'].num_examples num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000} super(DTDData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label']), num_classes=dataset_builder.info.features['label'].num_classes)
class SuperbKS(SuperbProblem): _cfg(**SuperbProblem.setup.default_except(corpus=dict(CLS=gsc_v1_for_superb, dataset_root='???'), train_datapipe=dict(CLS=UtteranceClassificationPipe, train_category_encoder=True, sox_effects=EFFECTS), train_sampler=dict(CLS=BalancedWeightedSampler, batch_size=32), valid_datapipe=dict(CLS=UtteranceClassificationPipe, sox_effects=EFFECTS), valid_sampler=dict(CLS=BalancedWeightedSampler, batch_size=32), test_datapipe=dict(CLS=UtteranceClassificationPipe, sox_effects=EFFECTS), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=32), downstream=dict(CLS=MeanPoolingLinear, hidden_size=256), task=dict(CLS=UtteranceClassificationTask))) def setup(cls, **cfg): super().setup(**cfg) _cfg(**SuperbProblem.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.0001), trainer=dict(total_steps=200000, log_step=100, eval_step=5000, save_step=1000, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='accuracy', valid_higher_better=True))) def train(cls, **cfg): super().train(**cfg) _cfg(**SuperbProblem.inference.default_cfg) def inference(cls, **cfg): super().inference(**cfg) _cfg(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')) def run(cls, **cfg): super().run(**cfg)
class Partition2(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.6.layer.0.layer_norm', 'l_1': 'encoder.block.6.layer.0.SelfAttention.q', 'l_2': 'encoder.block.6.layer.0.SelfAttention.k', 'l_3': 'encoder.block.6.layer.0.SelfAttention.v', 'l_4': 'encoder.block.6.layer.0.SelfAttention.o', 'l_5': 'encoder.block.6.layer.0.dropout', 'l_6': 'encoder.block.6.layer.1.layer_norm', 'l_7': 'encoder.block.6.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.6.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.6.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.6.layer.1.dropout', 'l_11': 'encoder.block.7.layer.0.layer_norm', 'l_12': 'encoder.block.7.layer.0.SelfAttention.q', 'l_13': 'encoder.block.7.layer.0.SelfAttention.k', 'l_14': 'encoder.block.7.layer.0.SelfAttention.v', 'l_15': 'encoder.block.7.layer.0.SelfAttention.o', 'l_16': 'encoder.block.7.layer.0.dropout', 'l_17': 'encoder.block.7.layer.1.layer_norm', 'l_18': 'encoder.block.7.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.7.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.7.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.7.layer.1.dropout', 'l_22': 'encoder.block.8.layer.0.layer_norm', 'l_23': 'encoder.block.8.layer.0.SelfAttention.q', 'l_24': 'encoder.block.8.layer.0.SelfAttention.k', 'l_25': 'encoder.block.8.layer.0.SelfAttention.v', 'l_26': 'encoder.block.8.layer.0.SelfAttention.o', 'l_27': 'encoder.block.8.layer.0.dropout', 'l_28': 'encoder.block.8.layer.1.layer_norm', 'l_29': 'encoder.block.8.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.8.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.8.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.8.layer.1.dropout', 'l_33': 'encoder.block.9.layer.0.layer_norm'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_1 = self.l_33(t_7) t_0 = t_0[2] return list(flatten((t_7, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def clean_ad_nrt(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', split: bool=False, inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame: if (output_format not in {'compact', 'standard'}): raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".') df = to_dask(df) df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, split, errors) for x in srs]), meta=object) df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0))) df = df.rename(columns={'_temp_': f'{column}_clean'}) df = df.drop(columns=['clean_code_tup']) if inplace: df[column] = df[f'{column}_clean'] df = df.drop(columns=f'{column}_clean') df = df.rename(columns={column: f'{column}_clean'}) with ProgressBar(minimum=1, disable=(not progress)): df = df.compute() return df
def compute_mmd(samples1, samples2, kernel, is_hist=True, *args, **kwargs): if is_hist: samples1 = [(s1 / np.sum(s1)) for s1 in samples1] samples2 = [(s2 / np.sum(s2)) for s2 in samples2] return ((disc(samples1, samples1, kernel, *args, **kwargs) + disc(samples2, samples2, kernel, *args, **kwargs)) - (2 * disc(samples1, samples2, kernel, *args, **kwargs)))
def add_checkpoint_args(parser): group = parser.add_argument_group('Checkpointing') group.add_argument('--save-dir', metavar='DIR', default='checkpoints', help='path to save checkpoints') group.add_argument('--restore-file', default='checkpoint_last.pt', help='filename from which to load checkpoint (default: <save-dir>/checkpoint_last.pt') group.add_argument('--reset-dataloader', action='store_true', help='if set, does not reload dataloader state from the checkpoint') group.add_argument('--reset-lr-scheduler', action='store_true', help='if set, does not load lr scheduler state from the checkpoint') group.add_argument('--reset-meters', action='store_true', help='if set, does not load meters from the checkpoint') group.add_argument('--reset-optimizer', action='store_true', help='if set, does not load optimizer state from the checkpoint') group.add_argument('--optimizer-overrides', default='{}', type=str, metavar='DICT', help='a dictionary used to override optimizer args when loading a checkpoint') group.add_argument('--save-interval', type=int, default=1, metavar='N', help='save a checkpoint every N epochs') group.add_argument('--save-interval-updates', type=int, default=0, metavar='N', help='save a checkpoint (and validate) every N updates') group.add_argument('--keep-interval-updates', type=int, default=(- 1), metavar='N', help='keep the last N checkpoints saved with --save-interval-updates') group.add_argument('--keep-last-epochs', type=int, default=(- 1), metavar='N', help='keep last N epoch checkpoints') group.add_argument('--no-save', action='store_true', help="don't save models or checkpoints") group.add_argument('--no-epoch-checkpoints', action='store_true', help='only store last and best checkpoints') group.add_argument('--no-last-checkpoints', action='store_true', help="don't store last checkpoints") group.add_argument('--no-save-optimizer-state', action='store_true', help="don't save optimizer-state as part of checkpoint") group.add_argument('--best-checkpoint-metric', type=str, default='loss', help='metric to use for saving "best" checkpoints') group.add_argument('--maximize-best-checkpoint-metric', action='store_true', help='select the largest metric value for saving "best" checkpoints') group.add_argument('--early-stop', default=10000, type=int) return group
def unit_to_english(u: str) -> str: return {'ns': 'nanosecond', 'us': 'microsecond', 'ms': 'millisecond', 's': 'second'}[u]
class ProbabilisticDistance(NumpyArrayMetric): def __init__(self, metric: str='PROBDST'): super().__init__(metric) def calculate(self): gt = self.reference.flatten().astype(np.int8) seg = self.prediction.flatten().astype(np.int8) probability_difference = np.absolute((gt - seg)).sum() probability_joint = (gt * seg).sum() if (probability_joint != 0): return (probability_difference / (2.0 * probability_joint)) else: return (- 1)
class TestBartlett(): def test_basic(self): assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0]) assert_allclose(windows.bartlett(7), [0, (1 / 3), (2 / 3), 1.0, (2 / 3), (1 / 3), 0]) assert_allclose(windows.bartlett(6, False), [0, (1 / 3), (2 / 3), 1.0, (2 / 3), (1 / 3)])
class MetaNeXtBlock(nn.Module): def __init__(self, dim, token_mixer=nn.Identity, norm_layer=nn.BatchNorm2d, mlp_layer=ConvMlp, mlp_ratio=4, act_layer=nn.GELU, ls_init_value=1e-06, drop_path=0.0): super().__init__() self.token_mixer = token_mixer(dim) self.norm = norm_layer(dim) self.mlp = mlp_layer(dim, int((mlp_ratio * dim)), act_layer=act_layer) self.gamma = (nn.Parameter((ls_init_value * torch.ones(dim))) if ls_init_value else None) self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity()) def forward(self, x): shortcut = x x = self.token_mixer(x) x = self.norm(x) x = self.mlp(x) if (self.gamma is not None): x = x.mul(self.gamma.reshape(1, (- 1), 1, 1)) x = (self.drop_path(x) + shortcut) return x
def sample_gaussian(mu, logvar): epsilon = tf.random_normal(tf.shape(logvar), name='epsilon') std = tf.exp((0.5 * logvar)) z = (mu + tf.multiply(std, epsilon)) return z
class SurfaceClassifier_multiLoss(nn.Module): def __init__(self, opt, filter_channels_2d, filter_channels_3d, filter_channels_joint): super(SurfaceClassifier_multiLoss, self).__init__() self.filters_2d = [] for idx in range(0, (len(filter_channels_2d) - 1)): if (idx == 0): self.filters_2d.append(nn.Conv1d(filter_channels_2d[idx], filter_channels_2d[(idx + 1)], 1)) else: self.filters_2d.append(nn.Conv1d((filter_channels_2d[0] + filter_channels_2d[idx]), filter_channels_2d[(idx + 1)], 1)) self.add_module(('features_2d_conv%d' % idx), self.filters_2d[idx]) self.filters_3d = [] for idx in range(0, (len(filter_channels_3d) - 1)): if (idx == 0): self.filters_3d.append(nn.Conv1d(filter_channels_3d[idx], filter_channels_3d[(idx + 1)], 1)) else: self.filters_3d.append(nn.Conv1d((filter_channels_3d[0] + filter_channels_3d[idx]), filter_channels_3d[(idx + 1)], 1)) self.add_module(('features_3d_conv%d' % idx), self.filters_3d[idx]) filter_channels_joint[0] = (filter_channels_2d[0] + filter_channels_3d[0]) filter_channels_fused = (filter_channels_2d[(- 2)] + filter_channels_3d[(- 2)]) self.filters_joint = [] for idx in range(0, (len(filter_channels_joint) - 1)): if (idx == 0): self.filters_joint.append(nn.Conv1d((filter_channels_joint[0] + filter_channels_fused), filter_channels_joint[(idx + 1)], 1)) else: self.filters_joint.append(nn.Conv1d((filter_channels_joint[0] + filter_channels_joint[idx]), filter_channels_joint[(idx + 1)], 1)) self.add_module(('features_joint_conv%d' % idx), self.filters_joint[idx]) self.sigmoid_layer = nn.Sigmoid() def forward(self, feature_2d, feature_3d): pred_sdf = [] feature_2d_skip = feature_2d feature_2d_pass = feature_2d for idx in range(len(self.filters_2d)): if ((idx == (len(self.filters_2d) - 1)) and (not self.training)): continue feature_2d_pass = self._modules[('features_2d_conv%d' % idx)]((feature_2d_pass if (idx == 0) else torch.cat([feature_2d_pass, feature_2d_skip], 1))) if (idx != (len(self.filters_2d) - 1)): feature_2d_pass = F.leaky_relu(feature_2d_pass) if (idx == (len(self.filters_2d) - 2)): feature_2d_fuse = feature_2d_pass else: pred_sdf_2d = self.sigmoid_layer(feature_2d_pass) pred_sdf.append(pred_sdf_2d) feature_3d_skip = feature_3d feature_3d_pass = feature_3d for idx in range(len(self.filters_3d)): if ((idx == (len(self.filters_3d) - 1)) and (not self.training)): continue feature_3d_pass = self._modules[('features_3d_conv%d' % idx)]((feature_3d_pass if (idx == 0) else torch.cat([feature_3d_pass, feature_3d_skip], 1))) if (idx != (len(self.filters_3d) - 1)): feature_3d_pass = F.leaky_relu(feature_3d_pass) if (idx == (len(self.filters_3d) - 2)): feature_3d_fuse = feature_3d_pass else: pred_sdf_3d = self.sigmoid_layer(feature_3d_pass) pred_sdf.append(pred_sdf_3d) feature_joint_skip = torch.cat([feature_2d_skip, feature_3d_skip], 1) feature_joint_pass = torch.cat([feature_2d_fuse, feature_3d_fuse], 1) for idx in range(len(self.filters_joint)): feature_joint_pass = self._modules[('features_joint_conv%d' % idx)](torch.cat([feature_joint_pass, feature_joint_skip], 1)) if (idx != (len(self.filters_joint) - 1)): feature_joint_pass = F.leaky_relu(feature_joint_pass) else: pred_sdf_joint = self.sigmoid_layer(feature_joint_pass) pred_sdf.append(pred_sdf_joint) return pred_sdf
class RandomVariable_generic(Parent): def __init__(self, X, RR): if (not is_ProbabilitySpace(X)): raise TypeError(('Argument X (= %s) must be a probability space' % X)) Parent.__init__(self, X) self._codomain = RR def probability_space(self): return self.base() def domain(self): return self.base() def codomain(self): return self._codomain def field(self): return self._codomain
_utils.test() def test_nested(): x = ti.field(ti.i32) y = ti.field(ti.i32) n = 128 ti.root.dense(ti.i, (n // 4)).dense(ti.i, 4).place(x) ti.root.dense(ti.i, n).place(y) def fill(): for i in x: x[i] = i y[i] = (i * 2) fill() for i in range(n): assert (x[i] == i) assert (y[i] == (i * 2))
class BucketizedColumnTransformer(CategoricalColumnTransformer): def __init__(self, source_column, boundaries): for i in six.moves.range((len(boundaries) - 1)): assert (boundaries[i] < boundaries[(i + 1)]), 'Boundaries must be sorted in ascending order' self.source_column = source_column self.boundaries = boundaries def _set_feature_column_names(self, names): CategoricalColumnTransformer._set_feature_column_names(self, names) self.source_column._set_feature_column_names(names) def get_feature_column_names(self): return self.source_column.get_feature_column_names() def num_classes(self): return (len(self.boundaries) + 1) def __call__(self, inputs): return apply_transform_on_value(self.source_column(inputs), (lambda x: np.searchsorted(self.boundaries, x, side='right')))
class LabelSanitizer(BaseEstimator, TransformerMixin): def __init__(self, sanitize_labels): self.sanitize_labels = sanitize_labels def transform(self, X, corrections): X = X.copy(deep=True) if (not self.sanitize_labels): print('Label sanization will be skipped.') else: print(corrections) if (len(corrections) and self.sanitize_labels): mask = corrections['label'].str.lower().str.contains('\\(a\\)') ids = corrections[mask]['id'].values ids = [id for id in ids if (id in X.index)] X.loc[(ids, ['a_coref', 'b_coref'])] = [True, False] mask = corrections['label'].str.lower().str.contains('\\(b\\)') ids = corrections[mask]['id'].values ids = [id for id in ids if (id in X.index)] X.loc[(ids, ['a_coref', 'b_coref'])] = [False, True] mask = corrections['label'].str.lower().str.contains('neither') ids = corrections[mask]['id'].values ids = [id for id in ids if (id in X.index)] X.loc[(ids, ['a_coref', 'b_coref'])] = [False, False] if (('a_coref' in X.columns) and ('b_coref' in X.columns)): y = pd.DataFrame(X[['a_coref', 'b_coref']].values, columns=['A', 'B']) y['NEITHER'] = ((~ y['A']) & (~ y['B'])) else: y = pd.DataFrame(([[False, False]] * len(X)), columns=['A', 'B']) y['NEITHER'] = ((~ y['A']) & (~ y['B'])) return {'X': X, 'y': y}
def retrieval_yr(var_cf_code, time, months, days, grid, area, lvllist, levtype, year, target): import cdsapi server = cdsapi.Client() print('variable: {}'.format(var_cf_code)) print(year) print('months: {}'.format(months)) print('days {}'.format(days)) if (levtype == 'sfc'): server.retrieve('reanalysis-era5-single-levels', {'product_type': 'reanalysis', 'class': 'ei', 'expver': '1', 'grid': grid, 'year': year, 'month': months, 'day': days, 'area': area, 'variable': var_cf_code, 'time': time, 'format': 'netcdf'}, target) elif (levtype == 'pl'): server.retrieve('reanalysis-era5-pressure-levels', {'product_type': 'reanalysis', 'class': 'ei', 'expver': '1', 'grid': grid, 'year': year, 'month': months, 'day': days, 'area': area, 'levelist': lvllist, 'variable': var_cf_code, 'time': time, 'format': 'netcdf'}, target) return year
class AutoEncoder(object): def __init__(self, **kwargs): params = {'nI': None, 'nH': 3, 'cf': 1, 'activation': 'tanh', 'optimizer': None, 'verbose': 0} for (key, item) in kwargs.items(): params[key] = item self.params = params def create_model(self): nI = self.params['nI'] nH = self.params['nH'] cf = self.params['cf'] activation = self.params['activation'] optimizer = self.params['optimizer'] verbose = self.params['verbose'] temp = np.linspace(nI, (nI / cf), (nH + 1)).astype(int) nH_enc = temp[1:] nH_dec = temp[:(- 1)][::(- 1)] input_layer = Input(shape=(nI,)) for (i, layer_size) in enumerate(nH_enc): if (i == 0): encoder = Dense(layer_size, activation=activation)(input_layer) else: encoder = Dense(layer_size, activation=activation)(encoder) for (i, layer_size) in enumerate(nH_dec): if (i == 0): decoder = Dense(layer_size, activation=activation)(encoder) else: decoder = Dense(layer_size, activation=activation)(decoder) autoencoder = Model(input_layer, decoder) if (optimizer == None): optimizer = optimizers.Adam(lr=0.001) if (verbose > 0): print('Created autoencoder with structure:') print(', '.join(('layer_{}: {}'.format(v, i) for (v, i) in enumerate(np.hstack([nI, nH_enc, nH_dec]))))) autoencoder.compile(optimizer=optimizer, loss='mean_squared_error') return autoencoder def train(self, x, **train_params): if self.params['verbose']: if (self.ann == None): print('Creating model.') self.create_model() self.ann.fit(x, x, **train_params) def predict(self, x, test_params={}): return self.ann.predict(x, **test_params)
class JointProbabilityDistribution(DiscreteFactor): def __init__(self, variables, cardinality, values): if np.isclose(np.sum(values), 1): super(JointProbabilityDistribution, self).__init__(variables, cardinality, values) else: raise ValueError("The probability values doesn't sum to 1.") def __repr__(self): var_card = ', '.join([f'{var}:{card}' for (var, card) in zip(self.variables, self.cardinality)]) return f'<Joint Distribution representing P({var_card}) at {hex(id(self))}>' def __str__(self): return self._str(phi_or_p='P') def marginal_distribution(self, variables, inplace=True): return self.marginalize(list((set(list(self.variables)) - set((variables if isinstance(variables, (list, set, dict, tuple)) else [variables])))), inplace=inplace) def check_independence(self, event1, event2, event3=None, condition_random_variable=False): JPD = self.copy() if isinstance(event1, str): raise TypeError('Event 1 should be a list or array-like structure') if isinstance(event2, str): raise TypeError('Event 2 should be a list or array-like structure') if event3: if isinstance(event3, str): raise TypeError('Event 3 cannot of type string') elif condition_random_variable: if (not all((isinstance(var, str) for var in event3))): raise TypeError('Event3 should be a 1d list of strings') event3 = list(event3) phi_z = JPD.marginal_distribution(event3, inplace=False).to_factor() for variable_pair in itertools.product(event1, event2): phi_xyz = JPD.marginal_distribution((event3 + list(variable_pair)), inplace=False).to_factor() phi_xz = JPD.marginal_distribution((event3 + [variable_pair[0]]), inplace=False).to_factor() phi_yz = JPD.marginal_distribution((event3 + [variable_pair[1]]), inplace=False).to_factor() if ((phi_xyz * phi_z) != (phi_xz * phi_yz)): return False return True else: JPD.conditional_distribution(event3) for variable_pair in itertools.product(event1, event2): if (JPD.marginal_distribution(variable_pair, inplace=False) != (JPD.marginal_distribution(variable_pair[0], inplace=False) * JPD.marginal_distribution(variable_pair[1], inplace=False))): return False return True def get_independencies(self, condition=None): JPD = self.copy() if condition: JPD.conditional_distribution(condition) independencies = Independencies() for variable_pair in itertools.combinations(list(JPD.variables), 2): if (JPD.marginal_distribution(variable_pair, inplace=False) == (JPD.marginal_distribution(variable_pair[0], inplace=False) * JPD.marginal_distribution(variable_pair[1], inplace=False))): independencies.add_assertions(variable_pair) return independencies def conditional_distribution(self, values, inplace=True): JPD = (self if inplace else self.copy()) JPD.reduce(values) JPD.normalize() if (not inplace): return JPD def copy(self): return JointProbabilityDistribution(self.scope(), self.cardinality, self.values) def minimal_imap(self, order): from pgmpy.models import BayesianModel def get_subsets(u): for r in range((len(u) + 1)): for i in itertools.combinations(u, r): (yield i) G = BayesianModel() for variable_index in range(len(order)): u = order[:variable_index] for subset in get_subsets(u): if ((len(subset) < len(u)) and self.check_independence([order[variable_index]], (set(u) - set(subset)), subset, True)): G.add_edges_from([(variable, order[variable_index]) for variable in subset]) return G def is_imap(self, model): from pgmpy.models import BayesianModel if (not isinstance(model, BayesianModel)): raise TypeError('model must be an instance of BayesianModel') factors = [cpd.to_factor() for cpd in model.get_cpds()] factor_prod = reduce(mul, factors) JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values) if (JPD_fact == factor_prod): return True else: return False def to_factor(self): return DiscreteFactor(self.variables, self.cardinality, self.values) def pmap(self): pass
def analyze_sdfg(sdfg: SDFG, w_d_map: Dict[(str, sp.Expr)], analyze_tasklet, assumptions: [str], detailed_analysis: bool=False) -> None: sdfg = deepcopy(sdfg) pipeline = FixedPointPipeline([StrictSymbolSSA()]) pipeline.apply_pass(sdfg, {}) array_symbols = get_array_size_symbols(sdfg) (equality_subs, all_subs) = parse_assumptions((assumptions if (assumptions is not None) else []), array_symbols) for sd in sdfg.all_sdfgs_recursive(): propagation.propagate_states(sd, concretize_dynamic_unbounded=True) symbols = {} sdfg_work_depth(sdfg, w_d_map, analyze_tasklet, symbols, equality_subs, (all_subs[0][0] if (len(all_subs) > 0) else {}), detailed_analysis) for (k, (v_w, v_d)) in w_d_map.items(): (v_w, v_d) = do_subs(v_w, v_d, all_subs) v_w = symeval(v_w, symbols) v_d = symeval(v_d, symbols) w_d_map[k] = (v_w, v_d)
def prepare_urban_sound_8k(data_folder, audio_data_folder, save_json_train, save_json_valid, save_json_test, train_fold_nums=[1, 2, 3, 4, 5, 6, 7, 8], valid_fold_nums=[9], test_fold_nums=[10], skip_manifest_creation=False): if (type(train_fold_nums) is int): train_fold_nums = [train_fold_nums] if (type(valid_fold_nums) is int): valid_fold_nums = [valid_fold_nums] if (type(test_fold_nums) is int): test_fold_nums = [test_fold_nums] for fold_num in train_fold_nums: if (fold_num not in ACCEPTABLE_FOLD_NUMS): print(f'Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}') logger.info(f'Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}') return for fold_num in valid_fold_nums: if (fold_num not in ACCEPTABLE_FOLD_NUMS): print(f'Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}') logger.info(f'Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}') return for fold_num in test_fold_nums: if (fold_num not in ACCEPTABLE_FOLD_NUMS): print(f'Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}') logger.info(f'Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}') return if folds_overlap(train_fold_nums, valid_fold_nums): print(f'Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!') logger.info(f'Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!') return if folds_overlap(train_fold_nums, test_fold_nums): print(f'Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!') logger.info(f'Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!') return if (not check_folders(audio_data_folder)): prompt_download_urban_sound_8k(audio_data_folder) return if (skip_manifest_creation is True): return urban_sound_8k_speechbrain_metadata_csv_path = os.path.join(os.path.abspath(data_folder), 'metadata/', MODIFIED_METADATA_FILE_NAME) if (not os.path.exists(urban_sound_8k_speechbrain_metadata_csv_path)): urban_sound_8k_speechbrain_metadata_csv_path = create_metadata_speechbrain_file(data_folder) metadata = load_data_csv(urban_sound_8k_speechbrain_metadata_csv_path) logger.info(f'Creating {save_json_train}, {save_json_valid}, and {save_json_test}') create_json(metadata, audio_data_folder, train_fold_nums, save_json_train) create_json(metadata, audio_data_folder, valid_fold_nums, save_json_valid) create_json(metadata, audio_data_folder, test_fold_nums, save_json_test)
def get_step_index(cfg, cur_epoch): steps = (cfg.SOLVER.STEPS + [cfg.SOLVER.MAX_EPOCH]) for (ind, step) in enumerate(steps): if (cur_epoch < step): break return (ind - 1)
def apply_hooks(operation: APIOperation, context: HookContext, hooks: (HookDispatcher | None), strategy: st.SearchStrategy, location: str) -> st.SearchStrategy: container = LOCATION_TO_CONTAINER[location] return apply_to_all_dispatchers(operation, context, hooks, strategy, container)
def index_num_in_tokenized_utterance(tokenized_utterance, ent_mask=None): tk_list = tokenized_utterance.split() if (ent_mask is None): ent_mask = ([False] * len(tk_list)) assert (len(tk_list) == len(ent_mask)) num2idxs = {} for (_idx_t, _tk) in enumerate(tk_list): if ent_mask[_idx_t]: continue try: num = int(_tk) if (num not in num2idxs): num2idxs[num] = [] num2idxs[num].append(_idx_t) except ValueError: pass return num2idxs
def get_config_single(config_path: str, overwrites: str=None) -> Dict[(str, any)]: config_path_yaml = config_path if (not config_path.endswith('config.yaml')): config_path_yaml = os.path.join(config_path, 'config.yaml') if ((not os.path.exists(config_path_yaml)) and (not os.path.isabs(config_path))): local_hf_config = os.path.join(os.getcwd(), 'config', 'huggingface_modelhub', (config_path + '.yaml')) if os.path.exists(local_hf_config): config_path_yaml = local_hf_config else: raise Exception((config_path + ' does not exist locally & is not a known huggingface config (if using hf you need to create a local config file in config/huggingface_modelhub)')) with open(config_path_yaml, 'r') as ymlfile: cfg = yaml.load(ymlfile, Loader=yaml.FullLoader) if ((overwrites is not None) and (overwrites != '')): over_parts = [yaml.load(x.replace('\\n', '\n'), Loader=yaml.FullLoader) for x in overwrites.split(',')] for d in over_parts: for (key, value) in d.items(): cfg[key] = value return cfg
_utils.test() def test_3d(): x = ti.field(ti.f32, shape=(16, 32, 64)) def func(): for (i, j, k) in ti.ndrange((4, 10), (3, 8), 17): x[(i, j, k)] = ((i + (j * 10)) + (k * 100)) func() for i in range(16): for j in range(32): for k in range(64): if ((4 <= i < 10) and (3 <= j < 8) and (k < 17)): assert (x[(i, j, k)] == ((i + (j * 10)) + (k * 100))) else: assert (x[(i, j, k)] == 0)
def make_batch_bert(sessions): (batch_input, batch_labels) = ([], []) for session in sessions: data = session[0] label_list = session[1] (context_speaker, context, emotion, sentiment) = data now_speaker = context_speaker[(- 1)] speaker_utt_list = [] inputString = '' for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)): inputString += (('<s' + str((speaker + 1))) + '> ') inputString += (utt + ' ') if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)): speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer)) concat_string = inputString.strip() batch_input.append(encode_right_truncated(concat_string, bert_tokenizer)) if (len(label_list) > 3): label_ind = label_list.index(emotion) else: label_ind = label_list.index(sentiment) batch_labels.append(label_ind) batch_input_tokens = padding(batch_input, bert_tokenizer) batch_labels = torch.tensor(batch_labels) return (batch_input_tokens, batch_labels)
def get_visible_commands_starting_with(ctx, starts_with): for c in ctx.command.list_commands(ctx): if c.startswith(starts_with): command = ctx.command.get_command(ctx, c) if (not command.hidden): (yield command)
def add_model_args(parser): group = parser.add_argument_group('Model configuration') group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', required=True, choices=ARCH_MODEL_REGISTRY.keys(), help='Model Architecture') group.add_argument('--criterion', default='cross_entropy', metavar='CRIT', choices=CRITERION_REGISTRY.keys(), help='Training Criterion') return group
.mpl_image_compare def test_random_summary_bar_with_data(): np.random.seed(0) fig = plt.figure() shap.summary_plot(np.random.randn(20, 5), np.random.randn(20, 5), plot_type='bar', show=False) fig.set_layout_engine('tight') return fig
class UniDaTrainer(DefaultTrainer): def __init__(self, cfg): super().__init__(cfg) (self.source_data_loader, self.target_data_loader, self.test_data_loader, self.val_data_loader) = self.build_data_loaders(cfg) self.evaluator = self.build_evaluator(cfg) self.max_iter = cfg.max_iter self.cfg = cfg if self.use_features: self.model.backbone = torch.nn.Identity() if cfg.ft_last_layer: self.model.backbone = self.model.partial_model def build_data_loaders(self, cfg): feature_dir = os.path.join(cfg.feature_dir, f'features-imgAug_{cfg.image_augmentation}', cfg.backbone.replace('/', ''), cfg.dataset) source_feature_path = os.path.join(feature_dir, f'{cfg.source_domain}.pth') target_feature_path = os.path.join(feature_dir, f'{cfg.target_domain}.pth') if ((cfg.fixed_backbone or cfg.ft_last_layer) and os.path.exists(source_feature_path) and os.path.exists(target_feature_path)): self.use_features = True print('Use pretrained features as dataloader') cfg.num_workers = 0 else: self.use_features = False print('Use I/O images as dataloader') source_feature_path = None target_feature_path = None return build_data_loaders(cfg.dataset, cfg.data_dir, cfg.source_domain, cfg.target_domain, cfg.n_share, cfg.n_source_private, cfg.image_augmentation, cfg.backbone, cfg.no_balanced, cfg.batch_size, cfg.num_workers, source_feature_path=source_feature_path, target_feature_path=target_feature_path, test_feature_path=target_feature_path, val_feature_path=source_feature_path) def build_evaluator(self, cfg): n_source_classes = (cfg.n_share + cfg.n_source_private) return UniDAEvaluator(n_source_classes) def load(self, checkpoint_path=None): if (checkpoint_path is None): checkpoint_path = self.model.get_save_checkpoint_dir(self.cfg.fixed_backbone) state_dict = torch.load(checkpoint_path) self.model.load_state_dict(state_dict, strict=False) def train(self, cfg=None): self.model.before_training(cfg={'source_data_loader': self.source_data_loader, 'target_data_loader': self.target_data_loader, 'test_data_loader': self.test_data_loader, 'val_data_loader': self.val_data_loader}) source_loader_iter = iter(self.source_data_loader) target_loader_iter = iter(self.target_data_loader) for step in range(self.max_iter): if self.model.require_source: try: source_batch_datas = next(source_loader_iter) except StopIteration: source_loader_iter = iter(self.source_data_loader) source_batch_datas = next(source_loader_iter) (source_images, source_labels) = (source_batch_datas['img'], source_batch_datas['label']) else: (source_images, source_labels) = (None, None) if self.model.require_target: try: target_batch_datas = next(target_loader_iter) except StopIteration: target_loader_iter = iter(self.target_data_loader) target_batch_datas = next(target_loader_iter) (target_images, target_indexs) = (target_batch_datas['img'], target_batch_datas['idx']) else: (target_images, target_indexs) = (None, None) batched_inputs = {'source_images': source_images, 'source_labels': source_labels, 'target_images': target_images, 'target_indexs': target_indexs} if isinstance(self.model, Oracle): batched_inputs['target_labels'] = target_batch_datas['label'] self.model.before_forward() loss_dict = self.model(batched_inputs=batched_inputs) if (loss_dict is not None): if isinstance(loss_dict, torch.Tensor): losses = loss_dict loss_dict = {'total_loss': loss_dict} else: losses = sum(loss_dict.values()) '\n If you need to accumulate gradients or do something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n ' self.optimizer.zero_grad() losses.backward() metrics_dict = {k: v.detach().cpu().item() for (k, v) in loss_dict.items()} metrics_dict['step'] = f'{step}/{self.max_iter}' metrics_dict['lr'] = self.lr_scheduler.get_last_lr()[(- 1)] self._write_metrics(metrics_dict) self.optimizer.step() self.lr_scheduler.step() self.model.after_backward() self.model.after_training() def test(self, cfg=None): self.model.before_predict(cfg={'val_data_loader': self.val_data_loader, 'test_data_loader': self.test_data_loader}) self.evaluator.reset() if (cfg is not None): current_step = cfg['step'] else: current_step = 'final' logits = [] iid_scores = [] true_labels = [] predict_labels = [] predict_labels_without_ood = [] for batch_datas in tqdm(self.test_data_loader): batched_inputs = {'test_images': batch_datas['img']} result_dict = self.model.predict(batched_inputs=batched_inputs) self.evaluator.process(batch_datas['label'], result_dict['predict_labels'], result_dict['predict_labels_without_ood'], result_dict['iid_scores'], result_dict['features']) true_labels.append(batch_datas['label'].cpu().detach()) if (result_dict['logits'] is not None): logits.append(result_dict['logits'].cpu().detach()) if (result_dict['iid_scores'] is not None): iid_scores.append(result_dict['iid_scores'].cpu().detach()) if (result_dict['predict_labels'] is not None): predict_labels.append(result_dict['predict_labels'].cpu().detach()) if (result_dict['predict_labels_without_ood'] is not None): predict_labels_without_ood.append(result_dict['predict_labels_without_ood'].cpu().detach()) if (not self.cfg.eval_only): save_scores_pth = get_save_scores_dir(self.cfg.feature_dir, f'{self.cfg.method}_{self.cfg.backbone}-{self.cfg.fixed_backbone}_{self.cfg.classifier_head}_{self.cfg.optimizer}_{self.cfg.batch_size}_{self.cfg.base_lr}_{self.cfg.fixed_BN}_{self.cfg.image_augmentation}_{current_step}-{self.cfg.max_iter}', self.cfg.dataset, self.cfg.source_domain, self.cfg.target_domain, self.cfg.n_share, self.cfg.n_source_private, self.cfg.seed, prefix='scores') save_data = {'true_labels': torch.cat(true_labels), 'target_logits': (torch.cat(logits) if (len(logits) != 0) else None), 'iid_scores': (torch.cat(iid_scores) if (len(iid_scores) != 0) else None), 'predict_labels': (torch.cat(predict_labels) if (len(predict_labels) != 0) else None), 'predict_labels_without_ood': (torch.cat(predict_labels_without_ood) if (len(predict_labels_without_ood) != 0) else None)} torch.save(save_data, save_scores_pth) results = self.evaluator.evaluate() self._write_metrics(results) save_dir = get_save_dir(self.cfg.result_dir, self.cfg.dataset, self.cfg.method, self.cfg.source_domain, self.cfg.target_domain, self.cfg.n_share, self.cfg.n_source_private, self.cfg.backbone, self.cfg.optimizer, self.cfg.base_lr, self.cfg.classifier_head, self.cfg.fixed_backbone, self.cfg.fixed_BN, self.cfg.image_augmentation, self.cfg.batch_size, f'{current_step}-{self.cfg.max_iter}', self.cfg.seed) save_as_json(results, save_dir)
class PostProcessMentionEntityCounts(PipelineJob): def __init__(self, preprocess_jobs: Dict[(str, PipelineJob)], opts): super().__init__(requires=[f'data/versions/{opts.data_version_name}/indexes/mention_entity_counter.pickle', f'data/versions/{opts.data_version_name}/indexes/entity_counter.pickle', f'data/versions/{opts.data_version_name}/indexes/linked_mention_counter.pickle', f'data/versions/{opts.data_version_name}/indexes/found_conll_entities.pickle'], provides=[f'data/versions/{opts.data_version_name}/indexes/mention_entity_counter_popular_entities.pickle', f'data/versions/{opts.data_version_name}/indexes/popular_entity_counter_dict.pickle', f'data/versions/{opts.data_version_name}/indexes/popular_entity_to_id_dict.pickle', f'data/versions/{opts.data_version_name}/indexes/mention_to_popular_entity_id_probabilies_dicts_dict.pickle'], preprocess_jobs=preprocess_jobs, opts=opts) def _run(self): with open(f'data/versions/{self.opts.data_version_name}/indexes/entity_counter.pickle', 'rb') as f: all_entity_counter = pickle.load(f) with open(f'data/versions/{self.opts.data_version_name}/indexes/mention_entity_counter.pickle', 'rb') as f: all_mention_entity_counter = pickle.load(f) with open(f'data/versions/{self.opts.data_version_name}/indexes/found_conll_entities.pickle', 'rb') as f: all_found_conll_entities = pickle.load(f) popular_entity_counter_dict = dict(all_entity_counter.most_common()[:self.opts.num_most_freq_entities]) if self.opts.add_missing_conll_entities: count = 0 for ent in all_found_conll_entities: if (ent in all_entity_counter): popular_entity_counter_dict[ent] = all_entity_counter[ent] count += 1 self.log(f'Added {count} entities from the conll data back to the most popular entities vocabulary.') mention_entity_counter_popular_entities = dict() for (mention, entities) in tqdm.tqdm(all_mention_entity_counter.items()): mention_entity_counter_popular_entities[mention] = Counter({k: v for (k, v) in filter((lambda t: ((t[0] in popular_entity_counter_dict) and (t[1] > 9))), entities.items())}).most_common() popular_entity_to_id_dict = OrderedDict([(k, eid) for (eid, (k, v)) in enumerate(Counter(popular_entity_counter_dict).most_common())]) mention_to_popular_entity_id_probabilies_dicts_dict = {m: {popular_entity_to_id_dict[ename]: (count / sum([val for (key, val) in entities])) for (ename, count) in entities if (ename in popular_entity_to_id_dict)} for (m, entities) in mention_entity_counter_popular_entities.items()} with open(f'data/versions/{self.opts.data_version_name}/indexes/mention_entity_counter_popular_entities.pickle', 'wb') as f: pickle.dump(mention_entity_counter_popular_entities, f) with open(f'data/versions/{self.opts.data_version_name}/indexes/popular_entity_counter_dict.pickle', 'wb') as f: pickle.dump(popular_entity_counter_dict, f) with open(f'data/versions/{self.opts.data_version_name}/indexes/popular_entity_to_id_dict.pickle', 'wb') as f: pickle.dump(popular_entity_to_id_dict, f) with open(f'data/versions/{self.opts.data_version_name}/indexes/mention_to_popular_entity_id_probabilies_dicts_dict.pickle', 'wb') as f: pickle.dump(mention_to_popular_entity_id_probabilies_dicts_dict, f)
def prior(lower_bound=(- 10.0), upper_bound=10.0, D=2, rng=None): if (rng is None): rng = np.random.default_rng() return rng.uniform(low=lower_bound, high=upper_bound, size=D)
def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab
class TestFromString(object): def test_floating(self): fsingle = np.single('1.234') fdouble = np.double('1.234') flongdouble = np.longdouble('1.234') assert_almost_equal(fsingle, 1.234) assert_almost_equal(fdouble, 1.234) assert_almost_equal(flongdouble, 1.234) def test_floating_overflow(self): fhalf = np.half('1e10000') assert_equal(fhalf, np.inf) fsingle = np.single('1e10000') assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') assert_equal(fhalf, (- np.inf)) fsingle = np.single('-1e10000') assert_equal(fsingle, (- np.inf)) fdouble = np.double('-1e10000') assert_equal(fdouble, (- np.inf)) flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, (- np.inf)) .skipif(((sys.version_info[0] >= 3) or ((sys.platform == 'win32') and (platform.architecture()[0] == '64bit'))), reason="numpy.intp('0xff', 16) not supported on Py3 or 64 bit Windows") def test_intp(self): i_width = ((np.int_(0).nbytes * 2) - 1) np.intp(('0x' + ('f' * i_width)), 16) assert_raises(OverflowError, np.intp, ('0x' + ('f' * (i_width + 1))), 16) assert_raises(ValueError, np.intp, '0x1', 32) assert_equal(255, np.intp('0xFF', 16))
def get_command(id_): os.environ['DEBUG'] = os.environ.get('DEBUG', 'false') os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' commands_dict = {} tokens_bsz = 16384 num_gpus = 8 accum_steps = 1 folder_suffix_params = ['max_source_length', 'gradient_accumulation_steps', 'learning_rate', 'train_max_tokens'] folder_suffix = '$'.join(folder_suffix_params) generate_in_eval = False fb_bart_256_args = [f'--model_name_or_path facebook/bart-base', f'--max_source_length 256', f'--max_target_length {FB_BART_MAX_LEN}', f'--fp16 {FB_BART_FP16}', f'--train_max_tokens {tokens_bsz}', f'--gradient_accumulation_steps {accum_steps}', f'--per_device_eval_batch_size {FB_BART_per_device_eval_batch_size}', f'--folder_suffix {folder_suffix}'] fb_bart_512_args = [f'--model_name_or_path facebook/bart-base', f'--max_source_length 512', f'--max_target_length {FB_BART_MAX_LEN}', f'--fp16 {FB_BART_FP16}', f'--train_max_tokens {tokens_bsz}', f'--gradient_accumulation_steps {accum_steps}', f'--per_device_eval_batch_size {FB_BART_per_device_eval_batch_size}', f'--folder_suffix {folder_suffix}'] fb_bart_1024_args = [f'--model_name_or_path facebook/bart-base', f'--max_source_length {FB_BART_MAX_LEN}', f'--max_target_length {FB_BART_MAX_LEN}', f'--fp16 {FB_BART_FP16}', f'--train_max_tokens {tokens_bsz}', f'--gradient_accumulation_steps {accum_steps}', f'--per_device_eval_batch_size {FB_BART_per_device_eval_batch_size}', f'--folder_suffix {folder_suffix}'] allenai_led_args = ['--model_name_or_path allenai/led-base-16384', f'--attention_window {ALLEN_AI_ATTENTION_WINDOW}', f'--max_target_length {ALLEN_AI_MAX_TARGET_LEN}', f'--fp16 {ALLEN_AI_FP16}', f'--train_max_tokens {tokens_bsz}', f'--gradient_accumulation_steps {accum_steps}', f'--per_device_eval_batch_size {ALLEN_AI_per_device_eval_batch_size}'] all_learning_rates = {'qasper': {'256-bart': 5e-05, '512-bart': 5e-05, '1024-bart': 5e-05, 'led-1024': 2e-05, 'led-4096': 5e-05, 'led-16384': 2e-05}, 'narrative_qa': {'256-bart': 0.0001, '512-bart': 5e-05, '1024-bart': 5e-05, 'led-1024': 2e-05, 'led-4096': 1e-05, 'led-16384': 1e-05}, 'gov_report': {'256-bart': 0.0005, '512-bart': 0.0005, '1024-bart': 0.0002, 'led-1024': 0.0001, 'led-4096': 0.0001, 'led-16384': 5e-05}, 'summ_screen_fd': {'256-bart': 0.0002, '512-bart': 0.0002, '1024-bart': 0.0001, 'led-1024': 5e-05, 'led-4096': 2e-05, 'led-16384': 1e-05}, 'qmsum': {'256-bart': 0.0001, '512-bart': 5e-05, '1024-bart': 5e-05, 'led-1024': 5e-05, 'led-4096': 1e-05, 'led-16384': 1e-05}, 'contract_nli': {'256-bart': 0.0001, '512-bart': 0.0001, '1024-bart': 5e-05, 'led-1024': 5e-05, 'led-4096': 2e-05, 'led-16384': 1e-05}, 'quality': {'256-bart': 5e-05, '512-bart': 2e-05, '1024-bart': 1e-05, 'led-1024': 2e-05, 'led-4096': 1e-05, 'led-16384': 1e-05}} distributed_str = (f'-m torch.distributed.run --nproc_per_node={num_gpus}' if (num_gpus > 1) else '') for dataset in ['qasper', 'narrative_qa', 'gov_report', 'summ_screen_fd', 'qmsum', 'contract_nli', 'quality']: base_args = ([f'python {distributed_str} src/run.py configs/train.json', f'--m configs/datasets/{dataset}.json', '--adam_epsilon 1e-6', '--adam_beta1 0.9', '--adam_beta2 0.98', '--weight_decay 0.001', '--logging_steps 10', '--gradient_checkpointing true', '--save_total_limit 2', '--preprocessing_num_workers 1', '--group_by_length true', '--do_eval True', '--load_best_model_at_end True', '--lr_scheduler linear', '--warmup_ratio 0.1'] + (['--m configs/no_metrics.json', '--predict_with_generate False', '--prediction_loss_only True'] if (not generate_in_eval) else [])) if (dataset == 'narrative_qa'): base_args.append('--trim_very_long_strings') dataset_learning_rates = all_learning_rates[dataset] commands_dict[f'{dataset}_256-bart'] = ((base_args + fb_bart_256_args) + [f"--learning_rate {dataset_learning_rates['256-bart']}"]) commands_dict[f'{dataset}_512-bart'] = ((base_args + fb_bart_512_args) + [f"--learning_rate {dataset_learning_rates['512-bart']}"]) commands_dict[f'{dataset}_1024-bart'] = ((base_args + fb_bart_1024_args) + [f"--learning_rate {dataset_learning_rates['1024-bart']}"]) commands_dict[f'{dataset}_led-1024'] = ((base_args + allenai_led_args) + [f"--learning_rate {dataset_learning_rates['led-1024']}", '--global_attention_first_token True', f'--folder_suffix global_attention_first_token${folder_suffix}', f'--max_source_length 1024']) commands_dict[f'{dataset}_led-4096'] = ((base_args + allenai_led_args) + [f"--learning_rate {dataset_learning_rates['led-4096']}", '--global_attention_first_token True', f'--folder_suffix global_attention_first_token${folder_suffix}', f'--max_source_length 4096']) commands_dict[f'{dataset}_led-16384'] = ((base_args + allenai_led_args) + [f"--learning_rate {dataset_learning_rates['led-16384']}", '--global_attention_first_token True', f'--folder_suffix global_attention_first_token${folder_suffix}', '--max_source_length 16384']) prepro_args = base_args[:] prepro_args[0] = prepro_args[0].replace(distributed_str, '') commands_dict[f'{dataset}_256-bart_data'] = ((prepro_args + fb_bart_256_args) + ['--preprocess_only', '--learning_rate 1e-3']) commands_dict[f'{dataset}_512-bart_data'] = ((prepro_args + fb_bart_512_args) + ['--preprocess_only', '--learning_rate 1e-3']) commands_dict[f'{dataset}_1024-bart_data'] = ((prepro_args + fb_bart_1024_args) + ['--preprocess_only', '--learning_rate 1e-3']) commands_dict[f'{dataset}_led-1024_data'] = ((prepro_args + allenai_led_args) + ['--preprocess_only', '--learning_rate 1e-3', '--global_attention_first_token True', f'--folder_suffix global_attention_first_token${folder_suffix}', '--max_source_length 1024']) commands_dict[f'{dataset}_led-4096_data'] = ((prepro_args + allenai_led_args) + ['--preprocess_only', '--learning_rate 1e-3', '--global_attention_first_token True', f'--folder_suffix global_attention_first_token${folder_suffix}', '--max_source_length 4096']) commands_dict[f'{dataset}_led-16384_data'] = ((prepro_args + allenai_led_args) + ['--preprocess_only', '--learning_rate 1e-3', '--global_attention_first_token True', f'--folder_suffix global_attention_first_token${folder_suffix}', '--max_source_length 16384']) command_parts = commands_dict[id_] return prep_command(command_parts)
class VideoKeyframeDataset(Dataset): _EMPTY_FRAMES = torch.empty((0, 3, 1, 1)) def __init__(self, video_list: List[str], frame_selector: Optional[FrameSelector]=None, transform: Optional[FrameTransform]=None): self.video_list = video_list self.frame_selector = frame_selector self.transform = transform def __getitem__(self, idx: int) -> torch.Tensor: fpath = self.video_list[idx] keyframes = list_keyframes(fpath) if (not keyframes): return self._EMPTY_FRAMES if (self.frame_selector is not None): keyframes = self.frame_selector(keyframes) frames = read_keyframes(fpath, keyframes) if (not frames): return self._EMPTY_FRAMES frames = np.stack([frame.to_rgb().to_ndarray() for frame in frames]) frames = torch.as_tensor(frames, device=torch.device('cpu')) if (self.transform is not None): frames = self.transform(frames) return frames def __len__(self): return len(self.video_list)
def load_examples_copa_rev(path): root = ET.parse(path).getroot() examples_copa = [] for type_tag in root.findall('item'): value = type_tag.get('most-plausible-alternative') asks_for = type_tag.get('asks-for') children = list(type_tag) p = (children[0].text[:1].lower() + children[0].text[1:]) a1 = (children[1].text[:1].lower() + children[1].text[1:(- 1)]) a2 = (children[2].text[:1].lower() + children[2].text[1:(- 1)]) if (asks_for == 'effect'): bridge = ' because' elif (asks_for == 'cause'): bridge = ' so' else: assert False examples_copa += [{'options': [{'premise': ((' ' + a1) + bridge), 'hypothesis': (' ' + p), 'uncond_premise': bridge, 'uncond_hypothesis': (' ' + p)}, {'premise': ((' ' + a2) + bridge), 'hypothesis': (' ' + p), 'uncond_premise': bridge, 'uncond_hypothesis': (' ' + p)}], 'label': (int(value) - 1)}] return examples_copa
_numpy_output(check_dtype=True) def test_ufunc_invert_f(A: dace.float32[10]): return np.invert(A)
('categorical_accuracy') class CategoricalAccuracy(Metric): def __init__(self, top_k: int=1) -> None: self._top_k = top_k self.correct_count = 0.0 self.total_count = 0.0 def __call__(self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: Optional[torch.Tensor]=None): (predictions, gold_labels, mask) = self.unwrap_to_tensors(predictions, gold_labels, mask) num_classes = predictions.size((- 1)) if (gold_labels.dim() != (predictions.dim() - 1)): raise ConfigurationError('gold_labels must have dimension == predictions.size() - 1 but found tensor of shape: {}'.format(predictions.size())) if (gold_labels >= num_classes).any(): raise ConfigurationError('A gold label passed to Categorical Accuracy contains an id >= {}, the number of classes.'.format(num_classes)) top_k = predictions.topk(min(self._top_k, predictions.shape[(- 1)]), (- 1))[1] correct = top_k.eq(gold_labels.long().unsqueeze((- 1))).float() if (mask is not None): correct *= mask.unsqueeze((- 1)) self.total_count += mask.sum() else: self.total_count += gold_labels.numel() self.correct_count += correct.sum() def get_metric(self, reset: bool=False): accuracy = (float(self.correct_count) / float(self.total_count)) if reset: self.reset() return accuracy def reset(self): self.correct_count = 0.0 self.total_count = 0.0
def compare_headers(request, serialized): headers = HTTPHeaderDict() for (name, value) in serialized.items(): for sub in value: headers.add(name, sub) assert (request.headers[name] == headers[name])
class FileOperator(object): def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() self._init_record() def _init_record(self): self.record = False self.files_written = set() self.dirs_created = set() def record_as_written(self, path): if self.record: self.files_written.add(path) def newer(self, source, target): if (not os.path.exists(source)): raise DistlibException(("file '%r' does not exist" % os.path.abspath(source))) if (not os.path.exists(target)): return True return (os.stat(source).st_mtime > os.stat(target).st_mtime) def copy_file(self, infile, outfile, check=True): self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying %s to %s', infile, outfile) if (not self.dry_run): msg = None if check: if os.path.islink(outfile): msg = ('%s is a symlink' % outfile) elif (os.path.exists(outfile) and (not os.path.isfile(outfile))): msg = ('%s is a non-regular file' % outfile) if msg: raise ValueError((msg + ' which would be overwritten')) shutil.copyfile(infile, outfile) self.record_as_written(outfile) def copy_stream(self, instream, outfile, encoding=None): assert (not os.path.isdir(outfile)) self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying stream %s to %s', instream, outfile) if (not self.dry_run): if (encoding is None): outstream = open(outfile, 'wb') else: outstream = codecs.open(outfile, 'w', encoding=encoding) try: shutil.copyfileobj(instream, outstream) finally: outstream.close() self.record_as_written(outfile) def write_binary_file(self, path, data): self.ensure_dir(os.path.dirname(path)) if (not self.dry_run): if os.path.exists(path): os.remove(path) with open(path, 'wb') as f: f.write(data) self.record_as_written(path) def write_text_file(self, path, data, encoding): self.write_binary_file(path, data.encode(encoding)) def set_mode(self, bits, mask, files): if ((os.name == 'posix') or ((os.name == 'java') and (os._name == 'posix'))): for f in files: if self.dry_run: logger.info('changing mode of %s', f) else: mode = ((os.stat(f).st_mode | bits) & mask) logger.info('changing mode of %s to %o', f, mode) os.chmod(f, mode) set_executable_mode = (lambda s, f: s.set_mode(365, 4095, f)) def ensure_dir(self, path): path = os.path.abspath(path) if ((path not in self.ensured) and (not os.path.exists(path))): self.ensured.add(path) (d, f) = os.path.split(path) self.ensure_dir(d) logger.info(('Creating %s' % path)) if (not self.dry_run): os.mkdir(path) if self.record: self.dirs_created.add(path) def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False): dpath = cache_from_source(path, (not optimize)) logger.info('Byte-compiling %s to %s', path, dpath) if (not self.dry_run): if (force or self.newer(path, dpath)): if (not prefix): diagpath = None else: assert path.startswith(prefix) diagpath = path[len(prefix):] compile_kwargs = {} if (hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode')): compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) self.record_as_written(dpath) return dpath def ensure_removed(self, path): if os.path.exists(path): if (os.path.isdir(path) and (not os.path.islink(path))): logger.debug('Removing directory tree at %s', path) if (not self.dry_run): shutil.rmtree(path) if self.record: if (path in self.dirs_created): self.dirs_created.remove(path) else: if os.path.islink(path): s = 'link' else: s = 'file' logger.debug('Removing %s %s', s, path) if (not self.dry_run): os.remove(path) if self.record: if (path in self.files_written): self.files_written.remove(path) def is_writable(self, path): result = False while (not result): if os.path.exists(path): result = os.access(path, os.W_OK) break parent = os.path.dirname(path) if (parent == path): break path = parent return result def commit(self): assert self.record result = (self.files_written, self.dirs_created) self._init_record() return result def rollback(self): if (not self.dry_run): for f in list(self.files_written): if os.path.exists(f): os.remove(f) dirs = sorted(self.dirs_created, reverse=True) for d in dirs: flist = os.listdir(d) if flist: assert (flist == ['__pycache__']) sd = os.path.join(d, flist[0]) os.rmdir(sd) os.rmdir(d) self._init_record()
class FFN(nn.Module): def __init__(self, __C): super(FFN, self).__init__() self.mlp = MLP(in_size=__C.HIDDEN_SIZE, mid_size=__C.FF_SIZE, out_size=__C.HIDDEN_SIZE, dropout_r=__C.DROPOUT_R, use_relu=True) def forward(self, x): return self.mlp(x)
class IdentificationClassificationModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None loss_cls: Optional[torch.FloatTensor] = None loss_span: Optional[torch.FloatTensor] = None class_logits: torch.FloatTensor = None span_logits: torch.FloatTensor = None
def split_files(org_dir, split_dir, short_name=None, train_size=0.7, dev_size=0.15, rotation=None): os.makedirs(split_dir, exist_ok=True) if ((train_size + dev_size) >= 1.0): print('Not making a test slice with the given ratios: train {} dev {}'.format(train_size, dev_size)) file_names = create_shuffle_list(org_dir) (train_path, dev_path, test_path) = create_paths(split_dir, short_name) num_samples = get_num_samples(org_dir, file_names) print('Found {} total samples in {}'.format(num_samples, org_dir)) stop_train = int((num_samples * train_size)) if ((train_size + dev_size) >= 1.0): stop_dev = num_samples output_limits = (stop_train, stop_dev) output_names = (train_path, dev_path) print('Splitting {} train, {} dev'.format(stop_train, (stop_dev - stop_train))) elif ((train_size + dev_size) > 0.0): stop_dev = int((num_samples * (train_size + dev_size))) output_limits = (stop_train, stop_dev, num_samples) output_names = (train_path, dev_path, test_path) print('Splitting {} train, {} dev, {} test'.format(stop_train, (stop_dev - stop_train), (num_samples - stop_dev))) else: stop_dev = 0 output_limits = (num_samples,) output_names = (test_path,) print('Copying all {} trees to test'.format(num_samples)) count = 0 trees = [] for filename in file_names: if (not filename.endswith('.mrg')): continue with open(os.path.join(org_dir, filename), encoding='utf-8') as reader: new_trees = reader.readlines() new_trees = [x.strip() for x in new_trees] new_trees = [x for x in new_trees if x] trees.extend(new_trees) if ((rotation is not None) and (rotation[0] > 0)): rotation_start = ((len(trees) * rotation[0]) // rotation[1]) rotation_end = stop_dev trees = ((trees[rotation_start:rotation_end] + trees[:rotation_start]) + trees[rotation_end:]) tree_iter = iter(trees) for (write_path, count_limit) in zip(output_names, output_limits): with open(write_path, 'w', encoding='utf-8') as writer: while (count < count_limit): next_tree = next(tree_iter, None) if (next_tree is None): raise RuntimeError('Ran out of trees before reading all of the expected trees') writer.write(next_tree) writer.write('\n') count += 1
def read_pretrain_eval_data(pretrain_data_dir): all_valid_files = [f for f in os.listdir(pretrain_data_dir) if f.endswith('_valid.jsonl')] languages = [f[:(- 12)] for f in all_valid_files] print(f'Found Languages : {languages}') examples_dict = {} for lang in languages: fp = open(os.path.join(pretrain_data_dir, (lang + '_valid.jsonl'))) examples = [] for (li, line) in enumerate(fp): d = json.loads(line.strip()) examples.append(Example(idx=li, source=d['source'], target=d['target'], meta_data={'transformer': d['transformer'], 'lang': lang})) examples_dict[lang] = examples return examples_dict
def simulator(theta, n_obs=4, flatten=True, rng=None): if (rng is None): rng = np.random.default_rng() loc = np.array([theta[0], theta[1]]) s1 = (theta[2] ** 2) s2 = (theta[3] ** 2) rho = np.tanh(theta[4]) cov = ((rho * s1) * s2) S_theta = np.array([[(s1 ** 2), cov], [cov, (s2 ** 2)]]) x = rng.multivariate_normal(loc, S_theta, size=n_obs) if flatten: return x.flatten() return x
def extract_sentence_transformer_embedding(sentence_transformer, utterances, intent): embedding = sentence_transformer.encode(utterances, convert_to_tensor=True) labels = ([intent] * embedding.shape[0]) return (embedding, labels)
class Partition10(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:10'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.6.layer.0.layer_norm', 'l_1': 'decoder.block.6.layer.0.SelfAttention.q', 'l_2': 'decoder.block.6.layer.0.SelfAttention.k', 'l_3': 'decoder.block.6.layer.0.SelfAttention.v', 'l_4': 'decoder.block.6.layer.0.SelfAttention.o', 'l_5': 'decoder.block.6.layer.0.dropout', 'l_6': 'decoder.block.6.layer.1.layer_norm', 'l_7': 'decoder.block.6.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.6.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.6.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.6.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.6.layer.1.dropout', 'l_12': 'decoder.block.6.layer.2.layer_norm', 'l_13': 'decoder.block.6.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.6.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.6.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.6.layer.2.dropout', 'l_17': 'decoder.block.7.layer.0.layer_norm', 'l_18': 'decoder.block.7.layer.0.SelfAttention.q', 'l_19': 'decoder.block.7.layer.0.SelfAttention.k', 'l_20': 'decoder.block.7.layer.0.SelfAttention.v', 'l_21': 'decoder.block.7.layer.0.SelfAttention.o', 'l_22': 'decoder.block.7.layer.0.dropout', 'l_23': 'decoder.block.7.layer.1.layer_norm', 'l_24': 'decoder.block.7.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.7.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.7.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.7.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.7.layer.1.dropout', 'l_29': 'decoder.block.7.layer.2.layer_norm', 'l_30': 'decoder.block.7.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.7.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.7.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.7.layer.2.dropout', 'l_34': 'decoder.block.8.layer.0.layer_norm', 'l_35': 'decoder.block.8.layer.0.SelfAttention.q', 'l_36': 'decoder.block.8.layer.0.SelfAttention.k', 'l_37': 'decoder.block.8.layer.0.SelfAttention.v', 'l_38': 'decoder.block.8.layer.0.SelfAttention.o', 'l_39': 'decoder.block.8.layer.0.dropout', 'l_40': 'decoder.block.8.layer.1.layer_norm', 'l_41': 'decoder.block.8.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.8.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.8.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.8.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.8.layer.1.dropout', 'l_46': 'decoder.block.8.layer.2.layer_norm', 'l_47': 'decoder.block.8.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.8.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.8.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.8.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def aggregate_emb_scores(q_ids_w_emb: dict): p_ids_avg_emb = {} for (key, value) in q_ids_w_emb.items(): list_emb = [emb[0] for emb in value] list_weights = [emb[1] for emb in value] p_ids_avg_emb.update({key: np.dot(list_weights, list_emb)}) return p_ids_avg_emb
def hf_preprocess_encodings(src: Dict[(str, List)]) -> Dict[(str, List)]: enc = preprocess_encodings(src['audio_encoding'], src['audio_encoding_shape']) src['audio_encoding'] = enc return src
class IncNpzFile(): def __init__(self, file: str): self.fn = file self.zip = zipfile.ZipFile(file, mode='a', compression=zipfile.ZIP_DEFLATED) self.keys = set() def __setitem__(self, key: str, data) -> None: if (key in self.keys): return self.keys.add(key) kwargs = {'mode': 'w', 'force_zip64': True} if ((self.zip is None) or (self.zip.fp is None)): self.zip = zipfile.ZipFile(self.fn, mode='a', compression=zipfile.ZIP_DEFLATED) with self.zip.open(key, **kwargs) as fid: val = np.asanyarray(data) format.write_array(fid, val, allow_pickle=True) def __getitem__(self, key: str): self.zip.close() return np.load(self.fn, allow_pickle=True)[key] def close(self): if (self.zip is not None): self.zip.close() self.zip = None def return_npz(self): self.zip.close() return np.load(self.fn, allow_pickle=True)
class MLP(nn.Module): hidden_dims: Sequence[int] activations: Callable[([jnp.ndarray], jnp.ndarray)] = nn.relu activate_final: int = False kernel_init: Callable[([PRNGKey, Shape, Dtype], Array)] = default_init() def setup(self): self.layers = [nn.Dense(size, kernel_init=self.kernel_init) for size in self.hidden_dims] def __call__(self, x: jnp.ndarray) -> jnp.ndarray: for (i, layer) in enumerate(self.layers): x = layer(x) if (((i + 1) < len(self.layers)) or self.activate_final): x = self.activations(x) return x
def check_jieba(): try: import jieba except ImportError: raise ImportError('Jieba is used but not installed on your machine. Go to for installation instructions.') return True
def _save(im, fp, filename): if (im.mode != '1'): raise OSError(('cannot write mode %s as XBM' % im.mode)) fp.write(('#define im_width %d\n' % im.size[0]).encode('ascii')) fp.write(('#define im_height %d\n' % im.size[1]).encode('ascii')) hotspot = im.encoderinfo.get('hotspot') if hotspot: fp.write(('#define im_x_hot %d\n' % hotspot[0]).encode('ascii')) fp.write(('#define im_y_hot %d\n' % hotspot[1]).encode('ascii')) fp.write(b'static char im_bits[] = {\n') ImageFile._save(im, fp, [('xbm', ((0, 0) + im.size), 0, None)]) fp.write(b'};\n')
class Quantization(nn.Module): def __init__(self, emb_size: int=768, subvector_num: int=96, subvector_bits: int=8, rotate: np.ndarray=None, codebook: np.ndarray=None): super(Quantization, self).__init__() if (codebook is not None): self.codebook = nn.Parameter(torch.FloatTensor(codebook), requires_grad=True) else: self.codebook = nn.Parameter(torch.empty(subvector_num, (2 ** subvector_bits), (emb_size // subvector_num)).uniform_((- 0.1), 0.1)).type(torch.FloatTensor) self.subvector_num = self.codebook.size(0) self.subvector_bits = int(math.log2(self.codebook.size(1))) if (rotate is not None): self.rotate = nn.Parameter(torch.FloatTensor(rotate), requires_grad=False) else: self.rotate = None def from_faiss_index(cls, index_file: str): print(f'loading PQ from Faiss index: {index_file}') index = faiss.read_index(index_file) if isinstance(index, faiss.IndexPreTransform): vt = faiss.downcast_VectorTransform(index.chain.at(0)) assert isinstance(vt, faiss.LinearTransform) rotate = faiss.vector_to_array(vt.A).reshape(vt.d_out, vt.d_in) pq_index = faiss.downcast_index(index.index) else: pq_index = index rotate = None centroid_embeds = faiss.vector_to_array(pq_index.pq.centroids) codebook = centroid_embeds.reshape(pq_index.pq.M, pq_index.pq.ksub, pq_index.pq.dsub) subvector_num = pq_index.pq.M pq = cls(subvector_num=subvector_num, rotate=rotate, codebook=codebook) return pq def rotate_vec(self, vecs): if (self.rotate is None): return vecs return torch.matmul(vecs, self.rotate.T) def code_selection(self, vecs): vecs = vecs.view(vecs.size(0), self.subvector_num, (- 1)) codebook = self.codebook.unsqueeze(0).expand(vecs.size(0), (- 1), (- 1), (- 1)) proba = (- torch.sum(((vecs.unsqueeze((- 2)) - codebook) ** 2), (- 1))) assign = F.softmax(proba, (- 1)) return assign def STEstimator(self, assign): index = assign.max(dim=(- 1), keepdim=True)[1] assign_hard = torch.zeros_like(assign, device=assign.device, dtype=assign.dtype).scatter_((- 1), index, 1.0) return ((assign_hard.detach() - assign.detach()) + assign) def quantized_vecs(self, assign): assign = self.STEstimator(assign) assign = assign.unsqueeze(2) codebook = self.codebook.unsqueeze(0).expand(assign.size(0), (- 1), (- 1), (- 1)) quantized_vecs = torch.matmul(assign, codebook).squeeze(2) quantized_vecs = quantized_vecs.view(assign.size(0), (- 1)) return quantized_vecs def quantization(self, vecs): assign = self.code_selection(vecs) quantized_vecs = self.quantized_vecs(assign) return quantized_vecs def quantization_loss(self, vec, quantized_vecs): return torch.mean(torch.sum(((vec - quantized_vecs) ** 2), dim=(- 1))) def save(self, save_path): if (self.rotate is not None): np.save(os.path.join(save_path, 'rotate_matrix'), self.rotate.detach().cpu().numpy()) np.save(os.path.join(save_path, 'codebook'), self.codebook.detach().cpu().numpy())
class Network(): def __init__(self, name: str=None, func_name: Any=None, **static_kwargs): tfutil.assert_tf_initialized() assert (isinstance(name, str) or (name is None)) assert (func_name is not None) assert (isinstance(func_name, str) or util.is_top_level_function(func_name)) assert util.is_pickleable(static_kwargs) self._init_fields() self.name = name self.static_kwargs = util.EasyDict(static_kwargs) if util.is_top_level_function(func_name): func_name = util.get_top_level_function_name(func_name) (module, self._build_func_name) = util.get_module_from_obj_name(func_name) self._build_func = util.get_obj_from_module(module, self._build_func_name) assert callable(self._build_func) self._build_module_src = _import_module_src.get(module, None) if (self._build_module_src is None): self._build_module_src = inspect.getsource(module) self._init_graph() self.reset_own_vars() def _init_fields(self) -> None: self.name = None self.scope = None self.static_kwargs = util.EasyDict() self.components = util.EasyDict() self.num_inputs = 0 self.num_outputs = 0 self.input_shapes = [[]] self.output_shapes = [[]] self.input_shape = [] self.output_shape = [] self.input_templates = [] self.output_templates = [] self.input_names = [] self.output_names = [] self.own_vars = OrderedDict() self.vars = OrderedDict() self.trainables = OrderedDict() self.var_global_to_local = OrderedDict() self._build_func = None self._build_func_name = None self._build_module_src = None self._run_cache = dict() def _init_graph(self) -> None: self.input_names = [] for param in inspect.signature(self._build_func).parameters.values(): if ((param.kind == param.POSITIONAL_OR_KEYWORD) and (param.default is param.empty)): self.input_names.append(param.name) self.num_inputs = len(self.input_names) assert (self.num_inputs >= 1) if (self.name is None): self.name = self._build_func_name assert re.match('^[A-Za-z0-9_.\\-]*$', self.name) with tf.compat.v1.name_scope(None): self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True) build_kwargs = dict(self.static_kwargs) build_kwargs['is_template_graph'] = True build_kwargs['components'] = self.components with tfutil.absolute_variable_scope(self.scope, reuse=False), tfutil.absolute_name_scope(self.scope): assert (tf.get_variable_scope().name == self.scope) assert (tf.get_default_graph().get_name_scope() == self.scope) with tf.control_dependencies(None): self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names] out_expr = self._build_func(*self.input_templates, **build_kwargs) assert (tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)) self.output_templates = ([out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)) self.num_outputs = len(self.output_templates) assert (self.num_outputs >= 1) assert all((tfutil.is_tf_expression(t) for t in self.output_templates)) if any(((t.shape.ndims is None) for t in self.input_templates)): raise ValueError('Network input shapes not defined. Please call x.set_shape() for each input.') if any(((t.shape.ndims is None) for t in self.output_templates)): raise ValueError('Network output shapes not defined. Please call x.set_shape() where applicable.') if any(((not isinstance(comp, Network)) for comp in self.components.values())): raise ValueError('Components of a Network must be Networks themselves.') if (len(self.components) != len(set((comp.name for comp in self.components.values())))): raise ValueError('Components of a Network must have unique names.') self.input_shapes = [t.shape.as_list() for t in self.input_templates] self.output_shapes = [t.shape.as_list() for t in self.output_templates] self.input_shape = self.input_shapes[0] self.output_shape = self.output_shapes[0] self.output_names = [t.name.split('/')[(- 1)].split(':')[0] for t in self.output_templates] self.own_vars = OrderedDict(((var.name[(len(self.scope) + 1):].split(':')[0], var) for var in tf.global_variables((self.scope + '/')))) self.vars = OrderedDict(self.own_vars) self.vars.update(((((comp.name + '/') + name), var) for comp in self.components.values() for (name, var) in comp.vars.items())) self.trainables = OrderedDict(((name, var) for (name, var) in self.vars.items() if var.trainable)) self.var_global_to_local = OrderedDict(((var.name.split(':')[0], name) for (name, var) in self.vars.items())) def reset_own_vars(self) -> None: tfutil.run([var.initializer for var in self.own_vars.values()]) def reset_vars(self) -> None: tfutil.run([var.initializer for var in self.vars.values()]) def reset_trainables(self) -> None: tfutil.run([var.initializer for var in self.trainables.values()]) def get_output_for(self, *in_expr: TfExpression, return_as_list: bool=False, **dynamic_kwargs) -> Union[(TfExpression, List[TfExpression])]: assert (len(in_expr) == self.num_inputs) assert (not all(((expr is None) for expr in in_expr))) build_kwargs = dict(self.static_kwargs) build_kwargs.update(dynamic_kwargs) build_kwargs['is_template_graph'] = False build_kwargs['components'] = self.components with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name): assert (tf.get_variable_scope().name == self.scope) valid_inputs = [expr for expr in in_expr if (expr is not None)] final_inputs = [] for (expr, name, shape) in zip(in_expr, self.input_names, self.input_shapes): if (expr is not None): expr = tf.identity(expr, name=name) else: expr = tf.zeros(([tf.shape(valid_inputs[0])[0]] + shape[1:]), name=name) final_inputs.append(expr) out_expr = self._build_func(*final_inputs, **build_kwargs) for (expr, final) in zip(in_expr, final_inputs): if isinstance(expr, tf.Tensor): expr.set_shape(final.shape) assert (tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)) if return_as_list: out_expr = ([out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)) return out_expr def get_var_local_name(self, var_or_global_name: Union[(TfExpression, str)]) -> str: assert (tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)) global_name = (var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name) return self.var_global_to_local[global_name] def find_var(self, var_or_local_name: Union[(TfExpression, str)]) -> TfExpression: assert (tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)) return (self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name) def get_var(self, var_or_local_name: Union[(TfExpression, str)]) -> np.ndarray: return self.find_var(var_or_local_name).eval() def set_var(self, var_or_local_name: Union[(TfExpression, str)], new_value: Union[(int, float, np.ndarray)]) -> None: tfutil.set_vars({self.find_var(var_or_local_name): new_value}) def __getstate__(self) -> dict: state = dict() state['version'] = 4 state['name'] = self.name state['static_kwargs'] = dict(self.static_kwargs) state['components'] = dict(self.components) state['build_module_src'] = self._build_module_src state['build_func_name'] = self._build_func_name state['variables'] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values())))) return state def __setstate__(self, state: dict) -> None: tfutil.assert_tf_initialized() self._init_fields() for handler in _import_handlers: state = handler(state) assert (state['version'] in [2, 3, 4]) self.name = state['name'] self.static_kwargs = util.EasyDict(state['static_kwargs']) self.components = util.EasyDict(state.get('components', {})) self._build_module_src = state['build_module_src'] self._build_func_name = state['build_func_name'] module_name = ('_tflib_network_import_' + uuid.uuid4().hex) module = types.ModuleType(module_name) sys.modules[module_name] = module _import_module_src[module] = self._build_module_src exec(self._build_module_src, module.__dict__) self._build_func = util.get_obj_from_module(module, self._build_func_name) assert callable(self._build_func) self._init_graph() self.reset_own_vars() tfutil.set_vars({self.find_var(name): value for (name, value) in state['variables']}) def clone(self, name: str=None, **new_static_kwargs) -> 'Network': net = object.__new__(Network) net._init_fields() net.name = (name if (name is not None) else self.name) net.static_kwargs = util.EasyDict(self.static_kwargs) net.static_kwargs.update(new_static_kwargs) net._build_module_src = self._build_module_src net._build_func_name = self._build_func_name net._build_func = self._build_func net._init_graph() net.copy_vars_from(self) return net def copy_own_vars_from(self, src_net: 'Network') -> None: names = [name for name in self.own_vars.keys() if (name in src_net.own_vars)] tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) def copy_vars_from(self, src_net: 'Network') -> None: names = [name for name in self.vars.keys() if (name in src_net.vars)] tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) def copy_trainables_from(self, src_net: 'Network') -> None: names = [name for name in self.trainables.keys() if (name in src_net.trainables)] tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names})) def convert(self, new_func_name: str, new_name: str=None, **new_static_kwargs) -> 'Network': if (new_name is None): new_name = self.name static_kwargs = dict(self.static_kwargs) static_kwargs.update(new_static_kwargs) net = Network(name=new_name, func_name=new_func_name, **static_kwargs) net.copy_vars_from(self) return net def setup_as_moving_average_of(self, src_net: 'Network', beta: TfExpressionEx=0.99, beta_nontrainable: TfExpressionEx=0.0) -> tf.Operation: with tfutil.absolute_name_scope((self.scope + '/_MovingAvg')): ops = [] for (name, var) in self.vars.items(): if (name in src_net.vars): cur_beta = (beta if (name in self.trainables) else beta_nontrainable) new_value = tfutil.lerp(src_net.vars[name], var, cur_beta) ops.append(var.assign(new_value)) return tf.group(*ops) def run(self, *in_arrays: Tuple[(Union[(np.ndarray, None)], ...)], input_transform: dict=None, output_transform: dict=None, return_as_list: bool=False, print_progress: bool=False, minibatch_size: int=None, num_gpus: int=1, assume_frozen: bool=False, **dynamic_kwargs) -> Union[(np.ndarray, Tuple[(np.ndarray, ...)], List[np.ndarray])]: assert (len(in_arrays) == self.num_inputs) assert (not all(((arr is None) for arr in in_arrays))) assert ((input_transform is None) or util.is_top_level_function(input_transform['func'])) assert ((output_transform is None) or util.is_top_level_function(output_transform['func'])) (output_transform, dynamic_kwargs) = _handle_legacy_output_transforms(output_transform, dynamic_kwargs) num_items = in_arrays[0].shape[0] if (minibatch_size is None): minibatch_size = num_items key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs) def unwind_key(obj): if isinstance(obj, dict): return [(key, unwind_key(value)) for (key, value) in sorted(obj.items())] if callable(obj): return util.get_top_level_function_name(obj) return obj key = repr(unwind_key(key)) if (key not in self._run_cache): with tfutil.absolute_name_scope((self.scope + '/_Run')), tf.control_dependencies(None): with tf.device('/cpu:0'): in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names] in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr])) out_split = [] for gpu in range(num_gpus): with tf.device(('/gpu:%d' % gpu)): net_gpu = (self.clone() if assume_frozen else self) in_gpu = in_split[gpu] if (input_transform is not None): in_kwargs = dict(input_transform) in_gpu = in_kwargs.pop('func')(*in_gpu, **in_kwargs) in_gpu = ([in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)) assert (len(in_gpu) == self.num_inputs) out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs) if (output_transform is not None): out_kwargs = dict(output_transform) out_gpu = out_kwargs.pop('func')(*out_gpu, **out_kwargs) out_gpu = ([out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)) assert (len(out_gpu) == self.num_outputs) out_split.append(out_gpu) with tf.device('/cpu:0'): out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)] self._run_cache[key] = (in_expr, out_expr) (in_expr, out_expr) = self._run_cache[key] out_arrays = [np.empty(([num_items] + expr.shape.as_list()[1:]), expr.dtype.name) for expr in out_expr] for mb_begin in range(0, num_items, minibatch_size): if print_progress: print(('\r%d / %d' % (mb_begin, num_items)), end='') mb_end = min((mb_begin + minibatch_size), num_items) mb_num = (mb_end - mb_begin) mb_in = [(src[mb_begin:mb_end] if (src is not None) else np.zeros(([mb_num] + shape[1:]))) for (src, shape) in zip(in_arrays, self.input_shapes)] mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in))) for (dst, src) in zip(out_arrays, mb_out): dst[mb_begin:mb_end] = src if print_progress: print(('\r%d / %d' % (num_items, num_items))) if (not return_as_list): out_arrays = (out_arrays[0] if (len(out_arrays) == 1) else tuple(out_arrays)) return out_arrays def list_ops(self) -> List[TfExpression]: include_prefix = (self.scope + '/') exclude_prefix = (include_prefix + '_') ops = tf.get_default_graph().get_operations() ops = [op for op in ops if op.name.startswith(include_prefix)] ops = [op for op in ops if (not op.name.startswith(exclude_prefix))] return ops def list_layers(self) -> List[Tuple[(str, TfExpression, List[TfExpression])]]: layers = [] def recurse(scope, parent_ops, parent_vars, level): if any(((p in scope) for p in ['/Shape', '/strided_slice', '/Cast', '/concat', '/Assign'])): return global_prefix = (scope + '/') local_prefix = global_prefix[(len(self.scope) + 1):] cur_ops = [op for op in parent_ops if (op.name.startswith(global_prefix) or (op.name == global_prefix[:(- 1)]))] cur_vars = [(name, var) for (name, var) in parent_vars if (name.startswith(local_prefix) or (name == local_prefix[:(- 1)]))] if ((not cur_ops) and (not cur_vars)): return for var in [op for op in cur_ops if op.type.startswith('Variable')]: var_prefix = (var.name + '/') cur_ops = [op for op in cur_ops if (not op.name.startswith(var_prefix))] contains_direct_ops = any(((('/' not in op.name[len(global_prefix):]) and (op.type not in ['Identity', 'Cast', 'Transpose'])) for op in cur_ops)) if (((level == 0) or (not contains_direct_ops)) and ((len(cur_ops) + len(cur_vars)) > 1)): visited = set() for rel_name in ([op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for (name, _var) in cur_vars]): token = rel_name.split('/')[0] if (token not in visited): recurse((global_prefix + token), cur_ops, cur_vars, (level + 1)) visited.add(token) return layer_name = scope[(len(self.scope) + 1):] layer_output = (cur_ops[(- 1)].outputs[0] if cur_ops else cur_vars[(- 1)][1]) layer_trainables = [var for (_name, var) in cur_vars if var.trainable] layers.append((layer_name, layer_output, layer_trainables)) recurse(self.scope, self.list_ops(), list(self.vars.items()), 0) return layers def print_layers(self, title: str=None, hide_layers_with_no_params: bool=False) -> None: rows = [[(title if (title is not None) else self.name), 'Params', 'OutputShape', 'WeightShape']] rows += [(['---'] * 4)] total_params = 0 for (layer_name, layer_output, layer_trainables) in self.list_layers(): num_params = sum((int(np.prod(var.shape.as_list())) for var in layer_trainables)) weights = [var for var in layer_trainables if var.name.endswith('/weight:0')] weights.sort(key=(lambda x: len(x.name))) if ((len(weights) == 0) and (len(layer_trainables) == 1)): weights = layer_trainables total_params += num_params if ((not hide_layers_with_no_params) or (num_params != 0)): num_params_str = (str(num_params) if (num_params > 0) else '-') output_shape_str = str(layer_output.shape) weight_shape_str = (str(weights[0].shape) if (len(weights) >= 1) else '-') rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]] rows += [(['---'] * 4)] rows += [['Total', str(total_params), '', '']] widths = [max((len(cell) for cell in column)) for column in zip(*rows)] print() for row in rows: print(' '.join(((cell + (' ' * (width - len(cell)))) for (cell, width) in zip(row, widths)))) print() def setup_weight_histograms(self, title: str=None) -> None: if (title is None): title = self.name with tf.name_scope(None), tf.device(None), tf.control_dependencies(None): for (local_name, var) in self.trainables.items(): if ('/' in local_name): p = local_name.split('/') name = ((((title + '_') + p[(- 1)]) + '/') + '_'.join(p[:(- 1)])) else: name = ((title + '_toplevel/') + local_name) tf.summary.histogram(name, var)
def patch_blendmask(cfg, model, output_names): def forward(self, tensor): images = None gt_instances = None basis_sem = None features = self.backbone(tensor) (basis_out, basis_losses) = self.basis_module(features, basis_sem) (proposals, proposal_losses) = self.proposal_generator(images, features, gt_instances, self.top_layer) return (basis_out['bases'], proposals) model.forward = types.MethodType(forward, model) output_names.extend(['bases']) for item in ['logits', 'bbox_reg', 'centerness', 'top_feats']: for l in range(len(cfg.MODEL.FCOS.FPN_STRIDES)): fpn_name = 'P{}'.format((3 + l)) output_names.extend([(fpn_name + item)])
class DegenerateCH4Tests(unittest.TestCase): def setUpClass(cls): cls.degenerate_CH4_manifold = load_degenerate_CH4_manifold() def test_load_degenerate_CH4_manifold_power_spectrum_shape(self): self.assertTrue((self.degenerate_CH4_manifold.data.SOAP_power_spectrum.shape == (162, 12))) def test_load_degenerate_CH4_manifold_bispectrum_shape(self): self.assertTrue((self.degenerate_CH4_manifold.data.SOAP_bispectrum.shape == (162, 12))) def test_load_degenerate_CH4_manifold_access_descr(self): self.degenerate_CH4_manifold.DESCR
def test_argsort(): array = ak.Array(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight']) assert (ak.operations.argsort(array, axis=(- 1)).to_list() == [7, 4, 3, 0, 6, 5, 2, 1]) array = ak.Array([['twotwo', 'two', 'three'], ['four', 'five'], [], ['six', 'seven', 'eight']]) assert (ak.operations.argsort(array, axis=(- 1)).to_list() == [[2, 1, 0], [1, 0], [], [2, 1, 0]]) array = ak.Array([[['twotwo', 'two'], ['three']], [['four', 'five']], [], [['six'], ['seven', 'eight']]]) assert (ak.operations.argsort(array, axis=(- 1)).to_list() == [[[1, 0], [0]], [[1, 0]], [], [[0], [1, 0]]])
class GraphSAGE(): def __init__(self, layer_sizes, generator=None, aggregator=None, bias=True, dropout=0.0, normalize='l2', activations=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, bias_initializer='zeros', bias_regularizer=None, bias_constraint=None, n_samples=None, input_dim=None, multiplicity=None): self.layer_sizes = layer_sizes self.max_hops = len(layer_sizes) self.bias = bias self.dropout = dropout if (normalize == 'l2'): self._normalization = Lambda((lambda x: K.l2_normalize(x, axis=(- 1)))) elif ((normalize is None) or (normalize == 'none') or (normalize == 'None')): self._normalization = Lambda((lambda x: x)) else: raise ValueError("Normalization should be either 'l2' or 'none'; received '{}'".format(normalize)) if (generator is not None): self._get_sizes_from_generator(generator) else: self.n_samples = _require_without_generator(n_samples, 'n_samples') self.input_feature_size = _require_without_generator(input_dim, 'input_dim') self.multiplicity = _require_without_generator(multiplicity, 'multiplicity') if (len(self.n_samples) != self.max_hops): raise ValueError(f'n_samples: expected one sample size for each of the {self.max_hops} layers, found {len(self.n_samples)} sample sizes') self.dims = ([self.input_feature_size] + layer_sizes) self._compute_neighbourhood_sizes() if (aggregator is None): self._aggregator = MeanAggregator elif issubclass(aggregator, Layer): self._aggregator = aggregator else: raise TypeError('Aggregator should be a subclass of Keras Layer') if (activations is None): activations = ((['relu'] * (self.max_hops - 1)) + ['linear']) elif (len(activations) != self.max_hops): raise ValueError('Invalid number of activations; require one function per layer') self.activations = activations self._aggs = [self._aggregator(output_dim=self.layer_sizes[layer], bias=self.bias, act=self.activations[layer], kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, kernel_constraint=kernel_constraint, bias_initializer=bias_initializer, bias_regularizer=bias_regularizer, bias_constraint=bias_constraint) for layer in range(self.max_hops)] def _get_sizes_from_generator(self, generator): if (not isinstance(generator, (GraphSAGENodeGenerator, GraphSAGELinkGenerator, Neo4jGraphSAGENodeGenerator))): errmsg = 'Generator should be an instance of GraphSAGENodeGenerator or GraphSAGELinkGenerator' if isinstance(generator, (NodeSequence, LinkSequence)): errmsg = ('Passing a Sequence object as the generator to GraphSAGE is no longer supported. ' + errmsg) raise TypeError(errmsg) self.n_samples = generator.num_samples if (len(self.n_samples) != self.max_hops): raise ValueError('Mismatched lengths: neighbourhood sample sizes {} versus layer sizes {}'.format(self.n_samples, self.layer_sizes)) self.multiplicity = generator.multiplicity feature_sizes = generator.graph.node_feature_sizes() if (len(feature_sizes) > 1): raise RuntimeError('GraphSAGE called on graph with more than one node type.') self.input_feature_size = feature_sizes.popitem()[1] def _compute_neighbourhood_sizes(self): def size_at(i): return np.product(self.n_samples[:i], dtype=int) self.neighbourhood_sizes = [size_at(i) for i in range((self.max_hops + 1))] def __call__(self, xin: List): def apply_layer(x: List, num_hops: int): layer_out = [] for i in range((self.max_hops - num_hops)): head_shape = K.int_shape(x[i])[1] neigh_in = Dropout(self.dropout)(Reshape((head_shape, self.n_samples[i], self.dims[num_hops]))(x[(i + 1)])) layer_out.append(self._aggs[num_hops]([Dropout(self.dropout)(x[i]), neigh_in])) return layer_out if (not isinstance(xin, list)): raise TypeError('Input features to GraphSAGE must be a list') if (len(xin) != (self.max_hops + 1)): raise ValueError('Length of input features should equal the number of GraphSAGE layers plus one') h_layer = xin for layer in range(0, self.max_hops): h_layer = apply_layer(h_layer, layer) h_layer = [(Reshape(K.int_shape(x)[2:])(x) if (K.int_shape(x)[1] == 1) else x) for x in h_layer] return (self._normalization(h_layer[0]) if (len(h_layer) == 1) else [self._normalization(xi) for xi in h_layer]) def _node_model(self): x_inp = [Input(shape=(s, self.input_feature_size)) for s in self.neighbourhood_sizes] x_out = self(x_inp) return (x_inp, x_out) def _link_model(self): (x_inp_src, x_out_src) = self._node_model() (x_inp_dst, x_out_dst) = self._node_model() x_inp = [x for ab in zip(x_inp_src, x_inp_dst) for x in ab] x_out = [x_out_src, x_out_dst] return (x_inp, x_out) def in_out_tensors(self, multiplicity=None): if (multiplicity is None): multiplicity = self.multiplicity if (multiplicity == 1): return self._node_model() elif (multiplicity == 2): return self._link_model() else: raise RuntimeError('Currently only multiplicities of 1 and 2 are supported. Consider using node_model or link_model method explicitly to build node or link prediction model, respectively.') def default_model(self, flatten_output=True): warnings.warn('The .default_model() method is deprecated. Please use .in_out_tensors() method instead.', DeprecationWarning, stacklevel=2) return self.in_out_tensors() node_model = deprecated_model_function(_node_model, 'node_model') link_model = deprecated_model_function(_link_model, 'link_model') build = deprecated_model_function(in_out_tensors, 'build')
def tf_efficientnet_b3(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model
def register_Ns3EpcX2SapSwitchConnectionParams_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::EpcX2Sap::SwitchConnectionParams const &', 'arg0')]) cls.add_instance_attribute('drbid', 'uint8_t', is_const=False) cls.add_instance_attribute('mmWaveCellId', 'uint16_t', is_const=False) cls.add_instance_attribute('mmWaveRnti', 'uint32_t', is_const=False) cls.add_instance_attribute('useMmWaveConnection', 'bool', is_const=False) return
class _MemoryEfficientFP16OptimizerMixin(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def has_flat_params(self): return False def state_dict(self): state_dict = self.wrapped_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): if ('loss_scale' in state_dict): self.scaler.loss_scale = state_dict['loss_scale'] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) groups = self.optimizer.param_groups saved_groups = state_dict['param_groups'] id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))} for (k, v) in state_dict['state'].items(): if (k in id_map): param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): loss = (loss * self.scaler.loss_scale) loss.backward() self._grads_are_scaled = True def _unscale_grads(self, multiply_grads=1.0): if self._grads_are_scaled: self._grads_are_scaled = False self.wrapped_optimizer.multiply_grads((multiply_grads / self.scaler.loss_scale)) else: assert (multiply_grads == 1.0) def multiply_grads(self, c): if self._grads_are_scaled: self._unscale_grads(c) else: self.wrapped_optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): self._unscale_grads() grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm, aggregate_norm_fn) overflow = DynamicLossScaler.has_overflow(grad_norm) prev_scale = self.scaler.loss_scale self.scaler.update_scale(overflow) if overflow: if (self.scaler.loss_scale <= self.min_loss_scale): self.scaler.loss_scale = prev_scale raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.min_loss_scale)) raise OverflowError(('setting loss scale to: ' + str(self.scaler.loss_scale))) return grad_norm def step(self, closure=None): self._unscale_grads() self.wrapped_optimizer.step(closure) def zero_grad(self): self.wrapped_optimizer.zero_grad() self._grads_are_scaled = False
def _create_test(bench_op_obj, orig_test_attrs, tags, OperatorTestCase, run_backward, bwd_input): test_attrs = copy.deepcopy(orig_test_attrs) test_attrs = {k: str(v) for (k, v) in test_attrs.items()} ascii_test_attrs = ast.literal_eval(json.dumps(test_attrs)) input_config = str(ascii_test_attrs)[1:(- 1)].replace("'", '') if bwd_input: test_attrs.update({'bwd': bwd_input}) test_name = bench_op_obj.test_name(**test_attrs) test_config = TestConfig(test_name, input_config, tags, run_backward) return OperatorTestCase(bench_op_obj, test_config)
def test_rpad_and_clip_listoffset_array(): content = ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])) offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10])) listoffsetarray = ak.contents.listoffsetarray.ListOffsetArray(offsets, content) assert (to_list(listoffsetarray) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], []]) assert (to_list(ak._do.pad_none(listoffsetarray, 3, 0, clip=True)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 3, 0, clip=True).form == ak._do.pad_none(listoffsetarray, 3, 0, clip=True).form) assert ((('option[' + str(listoffsetarray.form.type)) + ']') == str(ak._do.pad_none(listoffsetarray, 3, 0, clip=True).form.type)) assert (to_list(ak._do.pad_none(listoffsetarray, 7, 0, clip=True)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], [], None]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 7, 0, clip=True).form == ak._do.pad_none(listoffsetarray, 7, 0, clip=True).form) assert ((('option[' + str(listoffsetarray.form.type)) + ']') == str(ak._do.pad_none(listoffsetarray, 7, 0, clip=True).form.type)) assert (to_list(ak._do.pad_none(listoffsetarray, 5, 1, clip=True)) == [[0.0, 1.1, 2.2, None, None], [None, None, None, None, None], [3.3, 4.4, None, None, None], [5.5, None, None, None, None], [6.6, 7.7, 8.8, 9.9, None], [None, None, None, None, None]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 5, 1, clip=True).form == ak._do.pad_none(listoffsetarray, 5, 1, clip=True).form) assert (str(ak._do.pad_none(listoffsetarray, 5, 1).form.type) == 'var * ?float64') assert (str(ak._do.pad_none(listoffsetarray, 5, 1, clip=True).form.type) == '5 * ?float64') assert (to_list(ak._do.pad_none(listoffsetarray, 1, 1, clip=True)) == [[0.0], [None], [3.3], [5.5], [6.6], [None]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 1, 1, clip=True).form == ak._do.pad_none(listoffsetarray, 1, 1, clip=True).form) content = ak.contents.numpyarray.NumpyArray(np.array([1.5, 3.3])) index = ak.index.Index64(np.array([0, (- 3), 1, (- 2), 1, 0, 0, (- 3), (- 13), 0, 1, 1, 0, 1, 1, 1, 1, (- 10), 0, (- 1), 0, 0, 0, 1, (- 1), 1, 1])) indexedarray = ak.contents.indexedoptionarray.IndexedOptionArray(index, content) offsets = ak.index.Index64(np.array([14, 15, 15, 15, 26, 26, 26])) listoffsetarray = ak.contents.listoffsetarray.ListOffsetArray(offsets, indexedarray) assert (to_list(listoffsetarray) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]) assert (to_list(ak._do.pad_none(listoffsetarray, 1, 0, clip=True)) == [[3.3]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 1, 0, clip=True).form == ak._do.pad_none(listoffsetarray, 1, 0, clip=True).form) assert (to_list(ak._do.pad_none(listoffsetarray, 1, 1, clip=True)) == [[3.3], [None], [None], [3.3], [None], [None]]) assert (ak._do.pad_none(listoffsetarray.to_typetracer(), 1, 1, clip=True).form == ak._do.pad_none(listoffsetarray, 1, 1, clip=True).form)
class GeneratorDynamicItem(DynamicItem): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.current_generator = None self.num_provided_items = 0 def __call__(self, *args): if (self.num_provided_items == len(self.provides)): raise RuntimeError('DynamicItemPipeline called too many times!') if (not self.current_generator): self.current_generator = self.func(*args) out = next(self.current_generator) self.num_provided_items += 1 return out def next_takes(self): if (not self.current_generator): return self.takes else: return [] def next_provides(self): keys = self.provides[self.num_provided_items] if isinstance(keys, str): return [keys] else: return keys def provided_in_order(self): in_order = [] for keys in self.provides: if isinstance(keys, str): in_order.append([keys]) else: in_order.append(keys) return in_order def reset(self): if (self.current_generator is not None): self.current_generator.close() self.current_generator = None self.num_provided_items = 0
def FloatSingle(ctx=None): ctx = _get_ctx(ctx) return FPSortRef(Z3_mk_fpa_sort_single(ctx.ref()), ctx)
def use_cuda(enabled, device_id=0): if enabled: assert torch.cuda.is_available(), 'CUDA is not available' torch.cuda.set_device(device_id)
_experiment def ppo_garage_pytorch(ctxt, env_id, seed): deterministic.set_seed(seed) runner = LocalRunner(ctxt) env = GarageEnv(normalize(gym.make(env_id))) policy = PyTorch_GMP(env.spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, output_nonlinearity=None) value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, output_nonlinearity=None) policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=0.00025)), policy, max_optimization_epochs=10, minibatch_size=64) vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=0.00025)), value_function, max_optimization_epochs=10, minibatch_size=64) algo = PyTorch_PPO(env_spec=env.spec, policy=policy, value_function=value_function, policy_optimizer=policy_optimizer, vf_optimizer=vf_optimizer, max_path_length=hyper_parameters['max_path_length'], discount=0.99, gae_lambda=0.95, center_adv=True, lr_clip_range=0.2) runner.setup(algo, env) runner.train(n_epochs=hyper_parameters['n_epochs'], batch_size=hyper_parameters['batch_size'])
def test_parameter_file_load_save_using_global(): module_creator = ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]) proto_variable_inputs = module_creator.get_proto_variable_inputs() outputs = module_creator.module(*proto_variable_inputs) g = nn.graph_def.get_default_graph_by_variable(outputs) with create_temp_with_dir(nnp_file) as tmp_file: g.save(tmp_file) another = TSTNetNormal() variable_inputs = module_creator.get_variable_inputs() outputs = g(*variable_inputs) ref_outputs = another(*variable_inputs) with pytest.raises(AssertionError) as excinfo: forward_variable_and_check_equal(outputs, ref_outputs) nn.load_parameters(tmp_file) params = nn.get_parameters() another.set_parameters(params) ref_outputs = another(*variable_inputs) forward_variable_and_check_equal(outputs, ref_outputs)
def load_images_from_directory(names, rootdir, sources=None, standardize=False): images = {} if (sources is not None): for (source, name) in zip(sources, names): path = (os.path.join(rootdir, source, name) + '.*') path = glob.glob(path)[0] im = load_image(path, standardize=standardize) images.setdefault(source, {})[name] = im else: for name in names: path = (os.path.join(rootdir, name) + '.*') path = glob.glob(path)[0] im = load_image(path, standardize=standardize) images[name] = im return images
def test_RecordArray_NumpyArray_lazy(): v2a = ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))], ['x', 'y']) resultv2 = v2a._carry(ak.index.Index(np.array([1, 2], np.int64)), True) assert (to_list(resultv2) == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}]) assert (v2a.to_typetracer()._carry(ak.index.Index(np.array([1, 2], np.int64)), True).form == resultv2.form) v2b = ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))], None) resultv2 = v2b._carry(ak.index.Index(np.array([0, 1, 2, 3, 4], np.int64)), True) assert (to_list(resultv2) == [(0, 0.0), (1, 1.1), (2, 2.2), (3, 3.3), (4, 4.4)]) assert (v2b.to_typetracer()._carry(ak.index.Index(np.array([0, 1, 2, 3, 4], np.int64)), True).form == resultv2.form) v2c = ak.contents.recordarray.RecordArray([], [], 10) resultv2 = v2c[np.array([0], np.int64)] assert (to_list(resultv2) == [{}]) assert (v2c.to_typetracer()[np.array([0], np.int64)].form == resultv2.form) v2d = ak.contents.recordarray.RecordArray([], None, 10) resultv2 = v2d[np.array([0], np.int64)] assert (to_list(resultv2) == [()]) assert (v2d.to_typetracer()[np.array([0], np.int64)].form == resultv2.form)
def test_dimension_optiontype(): content = ak.contents.NumpyArray(np.array(primes[:((2 * 3) * 5)], dtype=np.int64)) offsets1 = ak.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], dtype=np.int64)) listoffsetarray = ak.contents.ListOffsetArray(offsets1, content) index = ak.index.Index64(np.array([5, (- 1), 3, 2, (- 1), 0], dtype=np.int64)) indexedarray = ak.contents.IndexedOptionArray(index, listoffsetarray) depth2 = ak.contents.RegularArray(indexedarray, 3) assert (to_list(depth2) == [[[101, 103, 107, 109, 113], None, [53, 59, 61, 67, 71]], [[31, 37, 41, 43, 47], None, [2, 3, 5, 7, 11]]]) assert (to_list(ak.prod(depth2, axis=(- 1), keepdims=False, highlevel=False)) == [[((((101 * 103) * 107) * 109) * 113), None, ((((53 * 59) * 61) * 67) * 71)], [((((31 * 37) * 41) * 43) * 47), None, ((((2 * 3) * 5) * 7) * 11)]]) assert (to_list(ak.prod(depth2, axis=(- 1), keepdims=True, highlevel=False)) == [[[((((101 * 103) * 107) * 109) * 113)], None, [((((53 * 59) * 61) * 67) * 71)]], [[((((31 * 37) * 41) * 43) * 47)], None, [((((2 * 3) * 5) * 7) * 11)]]]) content = ak.contents.NumpyArray(np.array(primes[:((2 * 3) * 5)], dtype=np.int64)) offsets1 = ak.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], dtype=np.int64)) listoffsetarray = ak.contents.ListOffsetArray(offsets1, content) index = ak.index.Index64(np.array([5, 4, 3, 2, 1, 0], dtype=np.int64)) indexedarray = ak.contents.IndexedArray(index, listoffsetarray) depth2 = ak.contents.RegularArray(indexedarray, 3) assert (to_list(depth2) == [[[101, 103, 107, 109, 113], [73, 79, 83, 89, 97], [53, 59, 61, 67, 71]], [[31, 37, 41, 43, 47], [13, 17, 19, 23, 29], [2, 3, 5, 7, 11]]]) assert (to_list(ak.prod(depth2, axis=(- 1), highlevel=False)) == [[((((101 * 103) * 107) * 109) * 113), ((((73 * 79) * 83) * 89) * 97), ((((53 * 59) * 61) * 67) * 71)], [((((31 * 37) * 41) * 43) * 47), ((((13 * 17) * 19) * 23) * 29), ((((2 * 3) * 5) * 7) * 11)]]) assert (to_list(ak.prod(depth2, axis=(- 1), keepdims=True, highlevel=False)) == [[[((((101 * 103) * 107) * 109) * 113)], [((((73 * 79) * 83) * 89) * 97)], [((((53 * 59) * 61) * 67) * 71)]], [[((((31 * 37) * 41) * 43) * 47)], [((((13 * 17) * 19) * 23) * 29)], [((((2 * 3) * 5) * 7) * 11)]]])
def _rec_unstack(source: Tensor, *, axis: Dim, declare_rec_time: bool=NotSpecified, name: Optional[Union[(str, rfl.Layer)]]=None) -> Tensor: if (not isinstance(source, Tensor)): raise TypeError(f'rec_unstack: unexpected type for source {source!r}, need tensor') args = {'axis': axis, 'declare_rec_time': declare_rec_time} args = {key: value for (key, value) in args.items() if (value is not NotSpecified)} return rfl.make_layer({'class': 'rec_unstack', 'from': source, **args}, name=(name or 'rec_unstack'))