code
stringlengths
101
5.91M
_loss def mse_loss_with_gmof(pred, target, sigma): loss = F.mse_loss(pred, target, reduction='none') loss = gmof(loss, sigma) return loss
def bench_group(model_list, bench_name, bench_group, bench_args): print_stderr('Benchmarking {}s...'.format(bench_name)) nn_results = bench(get_nn_runners(*model_list), bench_group, **bench_args) print_stderr('') return nn_results
def define_env(env): def models_by_organization(): schema = read_schema(SCHEMA_CLASSIC_YAML_FILENAME) result = defaultdict(list) name_to_model_object = {} for model_object in ALL_MODELS: name_to_model_object[model_object.name] = model_object for model_field in schema.models: model_object = name_to_model_object.get(model_field.name, None) model_info: ModelInfo = ModelInfo.from_model_field_and_model_object(model_field, model_object) result[model_info.creator_organization].append(model_info) if ('Simple' in result): del result['Simple'] return result def run_expanders(): return RUN_EXPANDERS def render_model_tags(model): return ', '.join([f'`{tag}`' for tag in model.tags])
class MLPEBM_cat(nn.Module): def __init__(self, nin, n_proj, n_cat=256, nint=256, nout=1): super().__init__() self.proj = nn.Linear(n_cat, n_proj) self.n_proj = n_proj self.net = mlp_ebm((nin * n_proj), nint, nout=nout) def forward(self, x): xr = x.view((x.size(0) * x.size(1)), x.size(2)) xr_p = self.proj(xr) x_p = xr_p.view(x.size(0), x.size(1), self.n_proj) x_p = x_p.view(x.size(0), (x.size(1) * self.n_proj)) return self.net(x_p)
def validate(opt, val_loader, model, epoch, mode, split): print('Validate {} {}...'.format(mode, split)) (img_embs, cap_embs) = encode_finetune_data(model, val_loader, opt.no_context, opt.no_image, opt.log_step, logging.info) ((r1, r5, r10, medr, meanr), (rank, top1)) = i2t_finetune(img_embs, cap_embs, measure=opt.measure, return_ranks=True) logging.info(('Image to text: %.2f, %.2f, %.2f, %.2f, %.2f' % (r1, r5, r10, medr, meanr))) if (mode == 'inference'): dataset = json.load(open(os.path.join(opt.data_path, 'test.json'), encoding='utf-8')) candidates = [] for d in dataset: dialog = d['dialog'] replaced_idx = d['replaced_idx'] candidates.append(dialog[replaced_idx].strip()) print(len(candidates)) with open(os.path.join(opt.output_dir, 'wrong_ans.txt'), 'w') as f: for (i, idx) in enumerate(top1): if (i != int(idx)): f.write('{} {}\n'.format(i, candidates[int(idx)])) currscore = r1 return (currscore, (r1, r5, meanr))
def build(session_file): logger.info('Gathering frequency statistics ...') freq_dict = collections.Counter() train_file = open(session_file, 'r') for (num, line) in enumerate(train_file): if ((num % 1000) == 0): logger.info('{} sessions / {} queries'.format(num, len(freq_dict))) queries = line.strip().split('\t') for query in queries: freq_dict[query] = (freq_dict.get(query, 0) + 1) train_file.close() logger.info('{} sessions / {} queries'.format((num + 1), len(freq_dict))) cPickle.dump(freq_dict, open((session_file + '_FREQ.mdl'), 'w'))
class HiddenConf(object): def __init__(self, name, parent_build=None, filters=None): self.name = name self.parent_build = parent_build self.filters = filters def gen_workflow_job(self, phase): return {self.gen_build_name(phase): {'requires': [self.parent_build.gen_build_name('build')], 'filters': self.filters}} def gen_build_name(self, _): return self.name
def run_prequential_supervised(stream, learner, max_samples, n_wait, y_expected=None): stream.restart() y_pred = np.zeros((max_samples // n_wait), dtype=np.int) y_true = np.zeros((max_samples // n_wait), dtype=np.int) j = 0 for i in range(max_samples): (X, y) = stream.next_sample() if ((i % n_wait) == 0): y_pred[j] = int(learner.predict(X)[0]) y_true[j] = int(y[0]) j += 1 learner.partial_fit(X, y) assert (type(learner.predict(X)) == np.ndarray) if (y_expected is not None): assert np.alltrue((y_pred == y_expected))
def rand_like(g, self, dtype, layout=None, device=None, pin_memory=False, memory_format=None): dtype = sym_help._get_const(dtype, 'i', 'dtype') if (dtype is None): dtype = 6 return g.op('RandomUniformLike', self, dtype_i=sym_help.scalar_type_to_onnx[dtype])
def download_and_extract(root: Path, info: DownloadInfo) -> None: root.mkdir(parents=True, exist_ok=True) downloaded_file_path = (root / info.url.split('/')[(- 1)]) if downloaded_file_path.exists(): logger.info('Existing dataset archive found. Skipping download stage.') else: logger.info('Downloading the %s dataset.', info.name) with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=info.name) as progress_bar: urlretrieve(url=f'{info.url}', filename=downloaded_file_path, reporthook=progress_bar.update_to) logger.info('Checking the hash of the downloaded file.') hash_check(downloaded_file_path, info.hash) logger.info('Extracting dataset into root folder.') if (downloaded_file_path.suffix == '.zip'): with ZipFile(downloaded_file_path, 'r') as zip_file: zip_file.extractall(root) elif (downloaded_file_path.suffix in ('.tar', '.gz', '.xz')): with tarfile.open(downloaded_file_path) as tar_file: safe_extract(tar_file, root) else: raise ValueError(f'Unrecognized file format: {downloaded_file_path}') logger.info('Cleaning up files.') downloaded_file_path.unlink()
def interpret_args(): parser = argparse.ArgumentParser() parser.add_argument('--raw_train_filename', type=str, default='../atis_data/data/resplit/processed/train_with_tables.pkl') parser.add_argument('--raw_dev_filename', type=str, default='../atis_data/data/resplit/processed/dev_with_tables.pkl') parser.add_argument('--raw_validation_filename', type=str, default='../atis_data/data/resplit/processed/valid_with_tables.pkl') parser.add_argument('--raw_test_filename', type=str, default='../atis_data/data/resplit/processed/test_with_tables.pkl') parser.add_argument('--data_directory', type=str, default='processed_data') parser.add_argument('--processed_train_filename', type=str, default='train.pkl') parser.add_argument('--processed_dev_filename', type=str, default='dev.pkl') parser.add_argument('--processed_validation_filename', type=str, default='validation.pkl') parser.add_argument('--processed_test_filename', type=str, default='test.pkl') parser.add_argument('--database_schema_filename', type=str, default=None) parser.add_argument('--embedding_filename', type=str, default=None) parser.add_argument('--input_vocabulary_filename', type=str, default='input_vocabulary.pkl') parser.add_argument('--output_vocabulary_filename', type=str, default='output_vocabulary.pkl') parser.add_argument('--input_key', type=str, default='nl_with_dates') parser.add_argument('--anonymize', type=bool, default=False) parser.add_argument('--anonymization_scoring', type=bool, default=False) parser.add_argument('--use_snippets', type=bool, default=False) parser.add_argument('--use_previous_query', type=bool, default=False) parser.add_argument('--maximum_queries', type=int, default=1) parser.add_argument('--use_copy_switch', type=bool, default=False) parser.add_argument('--use_query_attention', type=bool, default=False) parser.add_argument('--use_utterance_attention', type=bool, default=False) parser.add_argument('--freeze', type=bool, default=False) parser.add_argument('--scheduler', type=bool, default=False) parser.add_argument('--use_bert', type=bool, default=False) parser.add_argument('--bert_type_abb', type=str, help='Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS') parser.add_argument('--bert_input_version', type=str, default='v1') parser.add_argument('--fine_tune_bert', type=bool, default=False) parser.add_argument('--lr_bert', default=1e-05, type=float, help='BERT model learning rate.') parser.add_argument('--logdir', type=str, default='logs') parser.add_argument('--deterministic', type=bool, default=False) parser.add_argument('--num_train', type=int, default=(- 1)) parser.add_argument('--logfile', type=str, default='log.txt') parser.add_argument('--results_file', type=str, default='results.txt') parser.add_argument('--input_embedding_size', type=int, default=300) parser.add_argument('--output_embedding_size', type=int, default=300) parser.add_argument('--encoder_state_size', type=int, default=300) parser.add_argument('--decoder_state_size', type=int, default=300) parser.add_argument('--encoder_num_layers', type=int, default=1) parser.add_argument('--decoder_num_layers', type=int, default=2) parser.add_argument('--snippet_num_layers', type=int, default=1) parser.add_argument('--maximum_utterances', type=int, default=5) parser.add_argument('--state_positional_embeddings', type=bool, default=False) parser.add_argument('--positional_embedding_size', type=int, default=50) parser.add_argument('--snippet_age_embedding', type=bool, default=False) parser.add_argument('--snippet_age_embedding_size', type=int, default=64) parser.add_argument('--max_snippet_age_embedding', type=int, default=4) parser.add_argument('--previous_decoder_snippet_encoding', type=bool, default=False) parser.add_argument('--discourse_level_lstm', type=bool, default=False) parser.add_argument('--use_schema_attention', type=bool, default=False) parser.add_argument('--use_encoder_attention', type=bool, default=False) parser.add_argument('--use_schema_encoder', type=bool, default=False) parser.add_argument('--use_schema_self_attention', type=bool, default=False) parser.add_argument('--use_schema_encoder_2', type=bool, default=False) parser.add_argument('--batch_size', type=int, default=16) parser.add_argument('--train_maximum_sql_length', type=int, default=200) parser.add_argument('--train_evaluation_size', type=int, default=100) parser.add_argument('--dropout_amount', type=float, default=0.5) parser.add_argument('--initial_patience', type=float, default=10.0) parser.add_argument('--patience_ratio', type=float, default=1.01) parser.add_argument('--initial_learning_rate', type=float, default=0.001) parser.add_argument('--learning_rate_ratio', type=float, default=0.8) parser.add_argument('--interaction_level', type=bool, default=True) parser.add_argument('--reweight_batch', type=bool, default=False) parser.add_argument('--train', type=int, choices=[0, 1], default=0) parser.add_argument('--debug', type=bool, default=False) parser.add_argument('--evaluate', type=bool, default=False) parser.add_argument('--attention', type=bool, default=False) parser.add_argument('--enable_testing', type=bool, default=False) parser.add_argument('--use_predicted_queries', type=bool, default=False) parser.add_argument('--evaluate_split', type=str, default='dev') parser.add_argument('--evaluate_with_gold_forcing', type=bool, default=False) parser.add_argument('--eval_maximum_sql_length', type=int, default=1000) parser.add_argument('--results_note', type=str, default='') parser.add_argument('--compute_metrics', type=bool, default=False) parser.add_argument('--reference_results', type=str, default='') parser.add_argument('--interactive', type=bool, default=False) parser.add_argument('--database_username', type=str, default='aviarmy') parser.add_argument('--database_password', type=str, default='aviarmy') parser.add_argument('--database_timeout', type=int, default=2) parser.add_argument('--job', default='test_w_interaction', choices=['test_w_interaction', 'online_learning'], help='Set the job. For parser pretraining, see other scripts.') parser.add_argument('--seed', type=int, default=0, help='Random seed.') parser.add_argument('--raw_data_directory', type=str, help='The data directory of the raw spider data.') parser.add_argument('--num_options', type=str, default='3', help='[INTERACTION] Number of options.') parser.add_argument('--user', type=str, default='sim', choices=['sim', 'gold_sim', 'real'], help='[INTERACTION] User type.') parser.add_argument('--err_detector', type=str, default='any', help='[INTERACTION] The error detector: (1) prob=x for using policy probability threshold;(2) stddev=x for using Bayesian dropout threshold (need to set --dropout and --passes);(3) any for querying about every policy action;(4) perfect for using a simulated perfect detector.') parser.add_argument('--dropout', type=float, default=0.0, help='[INTERACTION] Dropout rate for Bayesian dropout-based uncertainty analysis. This does NOT change the dropout rate in training.') parser.add_argument('--passes', type=int, default=1, help='[INTERACTION] Number of decoding passes for Bayesian dropout-based uncertainty analysis.') parser.add_argument('--friendly_agent', type=int, default=0, choices=[0, 1], help='[INTERACTION] If 1, the agent will not trigger further interactions if any wrong decision is not resolved during parsing.') parser.add_argument('--ask_structure', type=int, default=0, choices=[0, 1], help='[INTERACTION] Set to True to allow questions about query structure (WHERE/GROUP_COL, ORDER/HAV_AGG_v2) in NL.') parser.add_argument('--output_path', type=str, default='temp', help='[INTERACTION] Where to save outputs.') parser.add_argument('--setting', type=str, default='', choices=['online_pretrain_10p', 'full_train'], help='Model setting; checkpoints will be loaded accordingly.') parser.add_argument('--supervision', type=str, default='full_expert', choices=['full_expert', 'misp_neil', 'misp_neil_perfect', 'misp_neil_pos', 'bin_feedback', 'bin_feedback_expert', 'self_train', 'self_train_0.5'], help='[LEARNING] Online learning supervision based on different algorithms.') parser.add_argument('--data_seed', type=int, choices=[0, 10, 100], help='[LEARNING] Seed for online learning data.') parser.add_argument('--start_iter', type=int, default=0, help='[LEARNING] Starting iteration in online learing.') parser.add_argument('--end_iter', type=int, default=(- 1), help='[LEARNING] Ending iteration in online learing.') parser.add_argument('--update_iter', type=int, default=1000, help='[LEARNING] Number of iterations per parser update.') args = parser.parse_args() if (not os.path.exists(args.logdir)): os.makedirs(args.logdir) if (not (args.train or args.evaluate or args.interactive or args.attention)): raise ValueError('You need to be training or evaluating') if (args.enable_testing and (not args.evaluate)): raise ValueError('You should evaluate the model if enabling testing') print(('## seed: %d' % args.seed)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) return args
(scope='module') def predict_sorted_dict(): converted_dict = {} for (user, item, score) in recs_data: converted_dict.setdefault(user, []) converted_dict[user].append((item, score)) for (user, items) in converted_dict.items(): items = sorted(items, key=(lambda x: x[1]), reverse=True) converted_dict[user] = items return converted_dict
class GCN(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout): super(GCN, self).__init__() self.convs = torch.nn.ModuleList() self.convs.append(GCNConv(in_channels, hidden_channels, cached=True)) self.bns = torch.nn.ModuleList() self.bns.append(torch.nn.BatchNorm1d(hidden_channels)) for _ in range((num_layers - 2)): self.convs.append(GCNConv(hidden_channels, hidden_channels, cached=True)) self.bns.append(torch.nn.BatchNorm1d(hidden_channels)) self.convs.append(GCNConv(hidden_channels, out_channels, cached=True)) self.dropout = dropout def reset_parameters(self): for conv in self.convs: conv.reset_parameters() for bn in self.bns: bn.reset_parameters() def forward(self, x, adj_t): for (i, conv) in enumerate(self.convs[:(- 1)]): x = conv(x, adj_t) x = self.bns[i](x) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.convs[(- 1)](x, adj_t) return x.log_softmax(dim=(- 1))
def set_seed(seed): np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) cudnn.deterministic = True cudnn.benchmark = False
class AutoModelForMultipleChoice(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
class BackboneTrial(PyTorchTrial): def __init__(self, trial_context: PyTorchTrialContext) -> None: self.context = trial_context self.hparams = AttrDict(trial_context.get_hparams()) self.last_epoch = 0 self.download_directory = self.download_data_from_s3() dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3), 'smnist': (10, 1), 'cifar100': (100, 3), 'scifar100': (100, 3), 'audio': (200, 1)} (n_classes, in_channels) = dataset_hypers[self.hparams.task] print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes) depth = list(map(int, self.hparams.backbone.split(',')))[0] width = list(map(int, self.hparams.backbone.split(',')))[1] if (self.hparams.task == 'audio'): self.criterion = nn.BCEWithLogitsLoss().cuda() self.backbone = Backbone_Audio(depth, n_classes, width, dropRate=self.hparams.droprate, in_channels=in_channels) else: self.criterion = nn.CrossEntropyLoss().cuda() self.backbone = Backbone_Pt(depth, n_classes, width, dropRate=self.hparams.droprate, in_channels=in_channels) total_params = (sum((p.numel() for p in self.backbone.parameters() if p.requires_grad)) / 1000000.0) print('Parameter size in MB(backbone): ', total_params) self.model = self.context.wrap_model(self.backbone) self.last_eval = 0 nesterov = (self.hparams.nesterov if self.hparams.momentum else False) self.opt = self.context.wrap_optimizer(torch.optim.SGD(self.model.parameters(), lr=self.hparams.learning_rate, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay, nesterov=nesterov)) self.lr_scheduler = self.context.wrap_lr_scheduler(lr_scheduler=torch.optim.lr_scheduler.LambdaLR(self.opt, lr_lambda=self.weight_sched, last_epoch=(self.hparams.start_epoch - 1)), step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH) def weight_sched(self, epoch) -> Any: if (self.hparams.epochs != 200): return (((0.2 ** (epoch >= int((0.3 * self.hparams.epochs)))) * (0.2 ** (epoch > int((0.6 * self.hparams.epochs))))) * (0.2 ** (epoch > int((0.8 * self.hparams.epochs))))) return (((0.2 ** (epoch >= 60)) * (0.2 ** (epoch >= 120))) * (0.2 ** (epoch >= 160))) def download_data_from_s3(self): s3_bucket = self.context.get_data_config()['bucket'] download_directory = os.getcwd() s3 = boto3.client('s3') download_from_s3(s3_bucket, self.hparams.task, download_directory) if self.hparams.train: (self.train_data, self.val_data, self.test_data) = load_data(self.hparams.task, download_directory, True, self.hparams.permute) self.build_test_data_loader(download_directory) else: (self.train_data, _, self.val_data) = load_data(self.hparams.task, download_directory, False, self.hparams.permute) return download_directory def build_training_data_loader(self) -> DataLoader: trainset = self.train_data print(len(trainset)) train_loader = DataLoader(trainset, num_workers=4, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, sampler=None, collate_fn=_collate_fn, pin_memory=False, drop_last=True) print(len(train_loader)) return train_loader def build_validation_data_loader(self) -> DataLoader: valset = self.val_data print(len(valset)) return DataLoader(valset, sampler=None, num_workers=4, collate_fn=_collate_fn_eval, shuffle=False, batch_size=1, pin_memory=False) def build_test_data_loader(self, download_directory): testset = self.test_data print(len(testset)) return '\n Train and Evaluate Methods\n ' def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int) -> Dict[(str, torch.Tensor)]: (x_train, _, y_train) = batch self.model.train() output = self.model(x_train) loss = self.criterion(output, y_train) self.context.backward(loss) self.context.step_optimizer(self.opt) return {'loss': loss} def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[(str, Any)]: if ((not self.hparams.train) and (self.hparams.task == 'audio')): return self.evaluate_audio_testset(self.val_data) loss_avg = utils_pt.AverageMeter() val_predictions = [] val_gts = [] with torch.no_grad(): for batch in data_loader: batch = self.context.to_device(batch) (input, target) = batch n = input.size(0) logits = self.model(input) logits = logits.mean(0).unsqueeze(0) loss = self.criterion(logits, target) loss_avg.update(loss, n) logits_sigmoid = torch.sigmoid(logits) val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0]) val_gts.append(target.detach().cpu().numpy()[0]) val_preds = np.asarray(val_predictions).astype('float32') val_gts = np.asarray(val_gts).astype('int32') map_value = average_precision_score(val_gts, val_preds, average='macro') results = {'loss': loss_avg.avg, 'val_mAP': map_value} if ((self.hparams.task == 'audio') and ((self.last_eval % 20) == 0)): results.update(self.evaluate_audio_testset(self.test_data)) self.last_eval += 1 return results def evaluate_audio_testset(self, testset) -> Dict[(str, torch.Tensor)]: cnt = 0 test_predictions = [] test_gts = [] for ix in range(testset.len): with torch.no_grad(): batch = testset[ix] (x, y) = batch x = x.cuda() y_pred = self.model(x) y_pred = y_pred.mean(0).unsqueeze(0) sigmoid_preds = torch.sigmoid(y_pred) test_predictions.append(sigmoid_preds.detach().cpu().numpy()[0]) test_gts.append(y.detach().cpu().numpy()[0]) test_predictions = np.asarray(test_predictions).astype('float32') test_gts = np.asarray(test_gts).astype('int32') stats = calculate_stats(test_predictions, test_gts) mAP = np.mean([stat['AP'] for stat in stats]) mAUC = np.mean([stat['auc'] for stat in stats]) results = {'test_mAUC': mAUC, 'test_mAP': mAP} return results
class TestParent3(UniqueRepresentation, Parent): def __init__(self): from sage.categories.sets_cat import Sets Parent.__init__(self, category=Sets()) class Element(ElementWrapper): pass
def hits_at_k(examples, scores, all_answers, verbose=False): assert (len(examples) == scores.shape[0]) dummy_mask = [DUMMY_ENTITY_ID, NO_OP_ENTITY_ID] for (i, example) in enumerate(examples): (e1, e2, r) = example e2_multi = (list(all_answers[e1][r]) + dummy_mask) target_score = scores[(i, e2)] scores[i][e2_multi] = 0 scores[i][dummy_mask] = 0 scores[i][e2] = target_score (top_k_scores, top_k_targets) = torch.topk(scores, min(scores.size(1), args.beam_size)) top_k_targets = top_k_targets.cpu().numpy() hits_at_1 = 0 hits_at_3 = 0 hits_at_5 = 0 hits_at_10 = 0 for (i, example) in enumerate(examples): (e1, e2, r) = example pos = np.where((top_k_targets[i] == e2))[0] if pos: pos = pos[0] if (pos < 10): hits_at_10 += 1 if (pos < 5): hits_at_5 += 1 if (pos < 3): hits_at_3 += 1 if (pos < 1): hits_at_1 += 1 hits_at_1 = (float(hits_at_1) / len(examples)) hits_at_3 = (float(hits_at_3) / len(examples)) hits_at_5 = (float(hits_at_5) / len(examples)) hits_at_10 = (float(hits_at_10) / len(examples)) if verbose: print(' = {:.3f}'.format(hits_at_1)) print(' = {:.3f}'.format(hits_at_3)) print(' = {:.3f}'.format(hits_at_5)) print(' = {:.3f}'.format(hits_at_10)) return (hits_at_1, hits_at_3, hits_at_5, hits_at_10)
def shape_list(x): x = tf.convert_to_tensor(x) if (x.get_shape().dims is None): return tf.shape(x) static = x.get_shape().as_list() shape = tf.shape(x) ret = [] for i in range(len(static)): dim = static[i] if (dim is None): dim = shape[i] ret.append(dim) return ret
def main(config): device = torch.device(('cuda' if config.is_gpu else 'cpu')) print(('using ' + str(device))) model_motion = slowfast() model_motion = model_motion.to(device) model = UGC_BVQA_model.resnet50(pretrained=False) model = torch.nn.DataParallel(model) model = model.to(device=device) model.load_state_dict(torch.load('ckpts/UGC_BVQA_model.pth')) if (config.method_name == 'single-scale'): (video_dist_spatial, video_name) = video_processing_spatial(config.dist) (video_dist_motion, video_name) = video_processing_motion(config.dist) with torch.no_grad(): model.eval() video_dist_spatial = video_dist_spatial.to(device) video_dist_spatial = video_dist_spatial.unsqueeze(dim=0) n_clip = len(video_dist_motion) feature_motion = torch.zeros([n_clip, (2048 + 256)]) for (idx, ele) in enumerate(video_dist_motion): ele = ele.unsqueeze(dim=0) ele = ele.permute(0, 2, 1, 3, 4) ele = pack_pathway_output(ele, device) (ele_slow_feature, ele_fast_feature) = model_motion(ele) ele_slow_feature = ele_slow_feature.squeeze() ele_fast_feature = ele_fast_feature.squeeze() ele_feature_motion = torch.cat([ele_slow_feature, ele_fast_feature]) ele_feature_motion = ele_feature_motion.unsqueeze(dim=0) feature_motion[idx] = ele_feature_motion feature_motion = feature_motion.unsqueeze(dim=0) outputs = model(video_dist_spatial, feature_motion) y_val = outputs.item() print(('The video name: ' + video_name)) print('The quality socre: {:.4f}'.format(y_val)) output_name = config.output if (not os.path.exists(output_name)): os.system('touch {}'.format(output_name)) f = open(output_name, 'w') f.write(video_name) f.write(',') f.write(str(y_val)) f.write('\n') f.close()
class PSAMask(nn.Module): def __init__(self, psa_type, mask_size=None): super(PSAMask, self).__init__() assert (psa_type in ['collect', 'distribute']) if (psa_type == 'collect'): psa_type_enum = 0 else: psa_type_enum = 1 self.psa_type_enum = psa_type_enum self.mask_size = mask_size self.psa_type = psa_type def forward(self, input): return psa_mask(input, self.psa_type_enum, self.mask_size) def __repr__(self): s = self.__class__.__name__ s += f'(psa_type={self.psa_type}, ' s += f'mask_size={self.mask_size})' return s
class Modulator(nn.Module): def __init__(self, dim_in, dim_hidden, num_layers): super().__init__() self.layers = nn.ModuleList([]) for ind in range(num_layers): is_first = (ind == 0) dim = (dim_in if is_first else (dim_hidden + dim_in)) self.layers.append(nn.Sequential(nn.Linear(dim, dim_hidden), nn.LeakyReLU())) self.weight_init = init_weights_normal self.layers.apply(self.weight_init) def forward(self, z): x = z hiddens = [] for layer in self.layers: x = layer(x) hiddens.append(x) x = torch.cat((x, z), dim=1) return tuple(hiddens)
class OutputTransition(nn.Module): def __init__(self, inChans, elu, nll): super(OutputTransition, self).__init__() self.conv1 = nn.Conv3d(inChans, 2, kernel_size=5, padding=2) self.bn1 = nn.InstanceNorm3d(2) self.conv2 = nn.Conv3d(2, 2, kernel_size=1) self.relu1 = ELUCons(elu, 2) def forward(self, x): out = self.relu1(self.bn1(self.conv1(x))) out = self.conv2(out) return out
def test_nested_globals(): def instantiated_global2(A): A[cfg.q] = cfg.cloned.p A = np.random.rand(10) reg_A = np.copy(A) reg_A[cfg.q] = cfg.cloned.p instantiated_global2(A) assert np.allclose(A, reg_A)
def efficientnet_b8(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model
def make_dataset(dir): images = [] assert os.path.isdir(dir), ('%s is not a valid directory' % dir) new_root = './fashion_data' if (not os.path.exists(new_root)): os.mkdir(new_root) train_root = './fashion_data/train' if (not os.path.exists(train_root)): os.mkdir(train_root) test_root = './fashion_data/test' if (not os.path.exists(test_root)): os.mkdir(test_root) train_images = [] train_f = open('./fashion_data/train.lst', 'r') for lines in train_f: lines = lines.strip() if lines.endswith('.jpg'): train_images.append(lines) test_images = [] test_f = open('./fashion_data/test.lst', 'r') for lines in test_f: lines = lines.strip() if lines.endswith('.jpg'): test_images.append(lines) print(train_images, test_images) for (root, _, fnames) in sorted(os.walk(dir)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) path_names = path.split('/') path_names[3] = path_names[3].replace('_', '') path_names[4] = ((path_names[4].split('_')[0] + '_') + ''.join(path_names[4].split('_')[1:])) path_names = ''.join(path_names) img = Image.open(path) imgcrop = img.crop((40, 0, 216, 256)) if (new_path in train_images): imgcrop.save(os.path.join(train_root, path_names)) elif (new_path in test_images): imgcrop.save(os.path.join(test_root, path_names))
def quadratic_residues(n): n = abs(int(n)) return sorted(set((ZZ(((a * a) % n)) for a in range(((n // 2) + 1)))))
def hipify(project_directory, show_detailed=False, extensions=('.cu', '.cuh', '.c', '.cc', '.cpp', '.h', '.in', '.hpp'), output_directory='', includes=(), extra_files=(), out_of_place_only=False, ignores=(), show_progress=True, hip_clang_launch=False, is_pytorch_extension=False, clean_ctx=None): if (project_directory == ''): project_directory = os.getcwd() if (not os.path.exists(project_directory)): print('The project folder specified does not exist.') sys.exit(1) if (not output_directory): project_directory.rstrip('/') output_directory = (project_directory + '_amd') if (not os.path.exists(output_directory)): shutil.copytree(project_directory, output_directory) all_files = list(matched_files_iter(output_directory, includes=includes, ignores=ignores, extensions=extensions, out_of_place_only=out_of_place_only, is_pytorch_extension=is_pytorch_extension)) all_files_set = set(all_files) all_files += [f for f in extra_files if (f not in all_files_set)] preprocess(output_directory, all_files, show_detailed=show_detailed, show_progress=show_progress, hip_clang_launch=hip_clang_launch, is_pytorch_extension=is_pytorch_extension, clean_ctx=clean_ctx)
def snip_download(outfolder='data/downloaded', start=0, end=2313, dl_dedup_set=True): metadata_dir = os.path.join(outfolder, 'metadata') dedup_set_path = os.path.join(outfolder, 'is_dup_mlp_1024_128_gelu_snn_2layer_notext.npy') os.makedirs(metadata_dir, exist_ok=True) if dl_dedup_set: print('downloading dedup set...') url = ' response = requests.get(url) open(dedup_set_path, 'wb').write(response.content) is_dup_all = np.load(dedup_set_path).ravel() abs_ind = 0 for n in range(start, end): print(f'downloading metadata file {n}/{end}') url = f' response = requests.get(url) parquet_path = os.path.join(metadata_dir, f'metadata_{n:04d}.parquet') open(parquet_path, 'wb').write(response.content) md = pd.read_parquet(parquet_path) non_dup_chunk = is_dup_all[abs_ind:(abs_ind + len(md.index))] non_dup_chunk = np.logical_not(non_dup_chunk) non_dup_chunk[0] = True md = md[non_dup_chunk] md.to_parquet(parquet_path) abs_ind += len(md.index)
_utils.test(require=ti.extension.sparse, exclude=ti.metal) def test_chain_compare(): a = ti.field(ti.i32) ti.root.dynamic(ti.i, 256).place(a) b = ti.field(ti.i32, shape=()) c = ti.field(ti.i32, shape=()) d = ti.field(ti.i32, shape=()) def func(): b[None] = 2 c[None] = 3 d[None] = 3 a[0] = (c[None] == d[None] != b[None] < d[None] > b[None] >= b[None] <= c[None]) a[1] = (b[None] <= c[None] != d[None] > b[None] == b[None]) func() assert a[0] assert (not a[1])
class TrackingAnything(): def __init__(self, sam_checkpoint, cutie_checkpoint, propainter_checkpoint, raft_checkpoint, flow_completion_checkpoint, args): self.args = args self.samcontroler = SamControler(sam_checkpoint, args.sam_model_type, args.device) self.cutie = BaseTracker(cutie_checkpoint, device=args.device) self.baseinpainter = ProInpainter(propainter_checkpoint, raft_checkpoint, flow_completion_checkpoint, args.device) def first_frame_click(self, image: np.ndarray, points: np.ndarray, labels: np.ndarray, multimask=True): (mask, logit, painted_image) = self.samcontroler.first_frame_click(image, points, labels, multimask) return (mask, logit, painted_image) def generator(self, images: list, template_mask: np.ndarray): masks = [] logits = [] painted_images = [] for i in tqdm(range(len(images)), desc='Tracking image'): if (i == 0): (mask, logit, painted_image) = self.cutie.track(images[i], template_mask) masks.append(mask) logits.append(logit) painted_images.append(painted_image) else: (mask, logit, painted_image) = self.cutie.track(images[i]) masks.append(mask) logits.append(logit) painted_images.append(painted_image) return (masks, logits, painted_images)
class Clipart(general_dataset): def __init__(self, root='data/meta-dataset/clipart', mode='test', backbone_name='resnet12', transform=None): assert (mode in ['train', 'val', 'test']) self.mode = mode (_, train_process, val_process) = load(backbone_name, jit=False) if ((mode == 'val') or (mode == 'test')): transform = val_process elif (mode == 'train'): transform = train_process super().__init__(root, transform) self.label = self.targets
def get_model_parallel_group(): global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_model_parallel_group() else: return None
def main(): if (len(sys.argv) != 1): usage(os.path.basename(sys.argv[0])) (m, bipartite_graph) = read_bipartite_matrix(sys.stdin) general_graph = bipartite_to_adjmatrix(m, bipartite_graph) for i in xrange(len(general_graph)): for j in xrange(len(general_graph)): if general_graph[i].has_key(j): sys.stdout.write('1') else: sys.stdout.write('0') if (j < (len(general_graph) - 1)): sys.stdout.write(' ') sys.stdout.write('\n')
def get_default_tokenizer(): default_vocab_path = pkg_resources.resource_filename('rxnfp', 'models/transformers/bert_ft_10k_25s/vocab.txt') return SmilesTokenizer(default_vocab_path, do_lower_case=False)
class GeneratorMLP(nn.Module): def __init__(self, output_dim): super(GeneratorMLP, self).__init__() self.hidden_dim = 256 self.model = nn.Sequential(nn.Linear(256, 512), nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 1024), nn.LeakyReLU(0.2, inplace=True), nn.Linear(1024, (args.n_timesteps * args.feature_dim))) def forward(self, z): batch_size = z.shape[0] cur_device = z.get_device() output = self.model(z) return output.view((batch_size, args.n_timesteps, args.feature_dim))
def test_initializing_example_background_knowledge_2(): (train, _) = load_toy_cancer() _bk = Background(modes=train.modes, line_search=True, recursion=True, number_of_clauses=8, number_of_cycles=10) assert (_bk.modes == train.modes) _capture = str(_bk) assert ('setParam: nodeSize=2.' in _capture) assert ('setParam: maxTreeDepth=3.' in _capture) assert ('setParam: numOfCycles=10.' in _capture) assert ('setParam: numOfClauses=8.' in _capture) assert ('setParam: lineSearch=true.' in _capture) assert ('setParam: recursion=true.' in _capture) assert ('friends(+Person,-Person).' in _capture) assert ('friends(-Person,+Person).' in _capture) assert ('smokes(+Person).' in _capture) assert ('cancer(+Person).' in _capture)
(repr=False) class UndefinedContentType(FailureContext): content_type: str defined_content_types: list[str] message: str title: str = 'Undocumented Content-Type' type: str = 'undefined_content_type'
def _auxiliary_random_forest_word(n, k): from sage.misc.prandom import shuffle w = (([0] * (((3 * n) + (2 * k)) - 3)) + ([1] * n)) shuffle(w) partial_sum = 0 min_value = 0 min_pos = 0 for (i, x) in enumerate(w): if x: partial_sum += 3 else: partial_sum -= 1 if (partial_sum < min_value): min_value = partial_sum min_pos = i return (w[(min_pos + 1):] + w[:min_pos])
def run_task(v): env = normalize(CartpoleEnv()) policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO(env=env, policy=policy, baseline=baseline, batch_size=4000, max_path_length=100, n_itr=40, discount=0.99, step_size=v['step_size']) algo.train()
def _test_quantitatively(sdfg): graph = sdfg.nodes()[0] A = np.random.rand(N.get()).astype(np.float64) B = np.random.rand(N.get()).astype(np.float64) C1 = np.random.rand(N.get()).astype(np.float64) C2 = np.random.rand(N.get()).astype(np.float64) D1 = np.random.rand(N.get()).astype(np.float64) D2 = np.random.rand(N.get()).astype(np.float64) csdfg = sdfg.compile() csdfg(A=A, B=B, C=C1, D=D1, N=N) del csdfg subgraph = SubgraphView(graph, [node for node in graph.nodes()]) me = MultiExpansion() me.setup_match(subgraph) assert (me.can_be_applied(sdfg, subgraph) == True) me.apply(sdfg) sf = SubgraphFusion() sf.setup_match(subgraph) assert (sf.can_be_applied(sdfg, subgraph) == True) sf.apply(sdfg) csdfg = sdfg.compile() csdfg(A=A, B=B, C=C2, D=D2, N=N) assert np.allclose(C1, C2) assert np.allclose(D1, D2)
def get_subject_label(subject_list, label_name): label = {} with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile: reader = csv.DictReader(csvfile) for row in reader: if (row['subject'] in subject_list): label[row['subject']] = row[label_name] return label
def random_word(tokens, tokenizer): output_label = [] for (i, token) in enumerate(tokens): prob = random.random() if ((token != '[CLS]') and (token != '[SEP]') and (token != SEP_TOKEN) and (prob < 0.15)): prob /= 0.15 if (prob < 0.8): tokens[i] = '[MASK]' elif (prob < 0.9): tokens[i] = random.choice(list(tokenizer.vocab.items()))[0] try: output_label.append(tokenizer.vocab[token]) except KeyError: output_label.append(tokenizer.vocab['[UNK]']) logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token)) else: output_label.append((- 1)) return (tokens, output_label)
def log_add(x, y): if (x == NEG_INF): return y elif (y == NEG_INF): return x else: if (y <= x): d = (y - x) r = x else: d = (x - y) r = y return (r + log1pexp(d))
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) .parametrize('inshape, kernel, out_channels, pad, stride, dilation, group, deformable_group, with_bias', [((2, 4, 6, 6), (3, 2), 4, (0, 0), (1, 1), (1, 1), 2, 2, True), ((2, 2, 5, 7), (3, 3), 2, (1, 1), (1, 2), (2, 1), 1, 1, True), ((2, 2, 5, 7), (3, 3), 2, (1, 1), (1, 2), (2, 1), 1, 2, False), ((2, 2, 5, 7), (3, 3), 2, (1, 1), (1, 2), (2, 1), 2, 1, False)]) .parametrize('with_mask', [True, False]) .parametrize('channel_last', [True, False]) .parametrize('base_axis', [1, (- 3)]) def test_forward_backward_2d(inshape, kernel, out_channels, pad, stride, dilation, group, deformable_group, with_mask, channel_last, with_bias, base_axis, seed, ctx, func_name): if channel_last: pytest.skip('channel_last=True is not supported in any backends so far.') import platform if platform.machine().startswith('arm'): pytest.skip('Skip the arm platform temporarily.') rng = np.random.RandomState(seed) func_args = [base_axis, pad, stride, dilation, group, deformable_group, channel_last] in_channels = inshape[base_axis] kshape = ((out_channels, (in_channels // group)) + kernel) offset_channels = (((2 * deformable_group) * kernel[0]) * kernel[1]) offset_shape = ((inshape[0:base_axis] + (offset_channels,)) + inshape[(base_axis + 1):]) mask_shape = ((inshape[0:base_axis] + (((deformable_group * kernel[0]) * kernel[1]),)) + inshape[(base_axis + 1):]) if channel_last: t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel)) inshape = tuple((inshape[i] for i in t.inv_axes)) t = refs.ChannelLastToFirstTranspose(len(offset_shape), len(kernel)) offset_shape = tuple((offset_shape[i] for i in t.inv_axes)) t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel)) kshape = tuple((kshape[i] for i in t.inv_axes)) x = rng.randn(*inshape).astype(np.float32) w = rng.randn(*kshape).astype(np.float32) b = (rng.randn(out_channels).astype(np.float32) if with_bias else None) offsets = ((3.8 * rng.rand(*offset_shape).astype(np.float32)) - 1.9) offsets += (np.logical_or((np.abs((offsets - np.floor(offsets))) < 0.1), (np.abs((offsets - np.ceil(offsets))) < 0.1)).astype(int) * 0.5) mask = (rng.rand(*mask_shape).astype(np.float32) if with_mask else None) inputs = [x, w, offsets, mask, b] atol_half = (1.0 if (in_channels > 64) else 0.15) function_tester(rng, F.deformable_convolution, ref_deformable_convolution_2d, inputs, func_args, atol_f=0.0001, atol_b=0.01, atol_accum=1e-05, dstep=0.01, ctx=ctx, func_name=func_name, atol_half=atol_half)
def get_embeddings(input_dim, instance, feature_extractor): if (len(instance.shape) > 1): features = [] for data in instance: with no_grad(): x = Variable(from_numpy(data)) feature = feature_extractor(x.view((- 1), input_dim).float()).data.numpy() feature = (feature / np.linalg.norm(feature)) features.append(feature[0]) elif (len(instance.shape) == 1): with no_grad(): x = Variable(from_numpy(instance)) feature = feature_extractor(x.view((- 1), input_dim).float())[0].data.numpy() features = (feature / np.linalg.norm(feature)) return features
def test_conv2d(default_implementation, sdfg_name, use_cpp_dispatcher): class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv2d(1, 4, 3) self.conv2 = nn.Conv2d(4, 4, 3) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x)) ptmodel = Model() x = torch.rand(1, 1, 8, 8) _module(sdfg_name=sdfg_name) class TestDecorator(Model): pass dace_model = DaceModule(ptmodel, sdfg_name=(sdfg_name + '_wrapped'), compile_torch_extension=use_cpp_dispatcher) dace_output = dace_model(x) dace_model_decorated = TestDecorator() dace_model_decorated(x) torch_output = ptmodel(x) assert np.allclose(torch_output.detach().numpy(), dace_output, atol=1e-06)
class MultiscaleData(Data): def __init__(self, x=None, edge_index=None, edge_attr=None, y=None, pos=None, normal=None, face=None, **kwargs): super(MultiscaleData, self).__init__(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y, pos=pos, normal=normal, face=face, **kwargs) def __inc__(self, key, value, *args, **kwargs): if ('batch' in key): return (int(value.max()) + 1) elif ((key == 'edge_index') or (key == 'face') or (key == 'inlet_index')): return self.num_nodes elif (('scale' in key) and (('cluster_map' in key) or ('edge_index' in key))): return (self[(key[:6] + '_cluster_map')].max() + 1) elif (('scale' in key) and ('sample_index' in key)): if (int(key[5]) == 0): return self.num_nodes else: return self[(('scale' + str((int(key[5]) - 1))) + '_sample_index')].size(dim=0) else: return 0 def __cat_dim__(self, key, value, *args, **kwargs): if (isinstance(value, SparseTensor) and ('adj' in key)): return (0, 1) elif (('edge_index' in key) or ('face' in key)): return (- 1) else: return 0
def is_bio_scheme(all_tags): for tag in all_tags: if (tag in EMPTY_OR_O_TAG): continue elif ((len(tag) > 2) and (tag[:2] in ('B-', 'I-', 'B_', 'I_'))): continue else: return False return True
class EarlyStopping(): def __init__(self, patience=7): self.reset(patience) def __call__(self, val_loss): score = (- val_loss) if (self.best_score is None): self.best_score = score elif (score < self.best_score): self.counter += 1 if (self.counter >= self.patience): self.early_stop = True else: self.best_score = score self.counter = 0 return self.early_stop def reset(self, patience=7): self.patience = patience self.counter = 0 self.best_score = None self.early_stop = False self.val_loss_min = np.Inf
class LongT5Config(PretrainedConfig): model_type = 'longt5' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, local_radius=127, global_block_size=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, encoder_attention_type='local', use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = (num_decoder_layers if (num_decoder_layers is not None) else self.num_layers) self.num_heads = num_heads self.local_radius = local_radius self.global_block_size = global_block_size self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.encoder_attention_type = encoder_attention_type self.use_cache = use_cache act_info = self.feed_forward_proj.split('-') self.dense_act_fn = act_info[(- 1)] self.is_gated_act = (act_info[0] == 'gated') if (((len(act_info) > 1) and (act_info[0] != 'gated')) or (len(act_info) > 2)): raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'") if (feed_forward_proj == 'gated-gelu'): self.dense_act_fn = 'gelu_new' super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
class AutoModelForAudioXVector(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_char(): a = ak.highlevel.Array(np.array([ord(x) for x in 'hey there'], dtype=np.uint8), check_valid=True) a = ak.with_parameter(a, '__array__', 'char') assert (str(a) == str([ord(c) for c in 'hey there'])) assert (ak.to_list(a) == 'hey there')
def _join_measurements(join, left_measurements, right_measurements): joined_measurements = _join_items(join, left_measurements, right_measurements) if (join == 'none'): common_measurements = {meas['name'] for meas in left_measurements}.intersection((meas['name'] for meas in right_measurements)) if common_measurements: raise exceptions.InvalidWorkspaceOperation(f'Workspaces cannot have any measurements in common with the same name: {common_measurements}. You can also try a different join operation: {Workspace.valid_joins}.') elif (join == 'outer'): _measurement_mapping = {} for measurement in joined_measurements: _measurement_mapping.setdefault(measurement['name'], []).append(measurement) incompatible_poi = [measurement_name for (measurement_name, measurements) in _measurement_mapping.items() if (len({measurement['config']['poi'] for measurement in measurements}) > 1)] if incompatible_poi: raise exceptions.InvalidWorkspaceOperation(f'Workspaces cannot have the same measurements with incompatible POI: {incompatible_poi}.') joined_measurements = [] for (measurement_name, measurements) in _measurement_mapping.items(): if (len(measurements) != 1): new_measurement = {'name': measurement_name, 'config': {'poi': measurements[0]['config']['poi'], 'parameters': _join_parameter_configs(measurement_name, *(measurement['config']['parameters'] for measurement in measurements))}} else: new_measurement = measurements[0] joined_measurements.append(new_measurement) return joined_measurements
.parametrize('seed', [313]) def test_function_context(seed): rng = np.random.RandomState(313) xd = rng.randn(2, 3) x = nn.Variable.from_numpy_array(xd) ctx1 = nn.Context(backend=['cpu:float'], array_class='CpuCachedArray', device_id='1') with nn.context_scope(ctx1): y = F.relu(x) ctx0 = nn.Context(backend=['cpu:float'], array_class='CpuCachedArray', device_id='0') assert (str(ctx0) != str(ctx1)) assert (str(ctx1) == str(y.parent.context)) with nn.context_scope(y.parent.context): z = F.relu(x) assert (str(y.parent.context) == str(z.parent.context))
def Manifold(dim: int, name: Optional[str], latex_name: Optional[str]=None, field: str='real', structure: Optional[str]=None, start_index: int=0, **extra_kwds) -> Union[(TopologicalManifold, DifferentiableManifold)]: from sage.rings.infinity import infinity from sage.manifolds.differentiable.manifold import DifferentiableManifold from sage.manifolds.differentiable.pseudo_riemannian import PseudoRiemannianManifold from sage.manifolds.differentiable.degenerate import DegenerateManifold from sage.manifolds.topological_submanifold import TopologicalSubmanifold from sage.manifolds.differentiable.differentiable_submanifold import DifferentiableSubmanifold from sage.manifolds.differentiable.pseudo_riemannian_submanifold import PseudoRiemannianSubmanifold from sage.manifolds.differentiable.degenerate_submanifold import DegenerateSubmanifold global _manifold_id _manifold_id += 1 unique_tag = (lambda : (getrandbits(128) * _manifold_id)) if (structure is None): if any(((extra_kwds.get(x, None) is not None) for x in ('metric_name', 'metric_latex_name', 'signature'))): structure = 'pseudo-Riemannian' if (structure is None): diff_degree = extra_kwds.get('diff_degree', infinity) if (diff_degree == infinity): structure = 'smooth' elif (diff_degree > 0): structure = 'differentiable' else: structure = 'topological' if (structure in ['topological', 'top']): if ((field == 'real') or isinstance(field, sage.rings.abc.RealField)): structure = RealTopologicalStructure() else: structure = TopologicalStructure() if ('ambient' in extra_kwds): ambient = extra_kwds['ambient'] return TopologicalSubmanifold(dim, name, field, structure, ambient=ambient, latex_name=latex_name, start_index=start_index, unique_tag=unique_tag()) return TopologicalManifold(dim, name, field, structure, latex_name=latex_name, start_index=start_index, unique_tag=unique_tag()) elif (structure in ['differentiable', 'diff', 'smooth']): if ('diff_degree' in extra_kwds): diff_degree = extra_kwds['diff_degree'] if ((structure == 'smooth') and (diff_degree != infinity)): raise ValueError(('diff_degree = {} is '.format(diff_degree) + 'not compatible with a smooth structure')) else: diff_degree = infinity if ((field == 'real') or isinstance(field, sage.rings.abc.RealField)): structure = RealDifferentialStructure() else: structure = DifferentialStructure() if ('ambient' in extra_kwds): ambient = extra_kwds['ambient'] return DifferentiableSubmanifold(dim, name, field, structure, ambient=ambient, diff_degree=diff_degree, latex_name=latex_name, start_index=start_index, unique_tag=unique_tag()) return DifferentiableManifold(dim, name, field, structure, diff_degree=diff_degree, latex_name=latex_name, start_index=start_index, unique_tag=unique_tag()) elif (structure in ['pseudo-Riemannian', 'Riemannian', 'Lorentzian', 'degenerate_metric']): diff_degree = extra_kwds.get('diff_degree', infinity) metric_name = extra_kwds.get('metric_name', None) metric_latex_name = extra_kwds.get('metric_latex_name', None) if (structure == 'pseudo-Riemannian'): signature = extra_kwds.get('signature', None) elif (structure == 'Riemannian'): signature = dim elif (structure == 'degenerate_metric'): signature = (0, (dim - 1), 1) elif (structure == 'Lorentzian'): if ('signature' in extra_kwds): signat = extra_kwds['signature'] if ((signat == 'positive') or (signat == (dim - 2))): signature = (dim - 2) elif ((signat == 'negative') or (signat == (2 - dim))): signature = (2 - dim) else: raise ValueError((('signature {} not '.format(signat) + 'compatible with a Lorentzian ') + 'manifold of dimension {}'.format(dim))) else: signature = (dim - 2) if ('ambient' in extra_kwds): ambient = extra_kwds['ambient'] if (structure == 'degenerate_metric'): return DegenerateSubmanifold(dim, name, ambient=ambient, metric_name=metric_name, signature=signature, diff_degree=diff_degree, latex_name=latex_name, metric_latex_name=metric_latex_name, start_index=start_index, unique_tag=unique_tag()) return PseudoRiemannianSubmanifold(dim, name, ambient=ambient, metric_name=metric_name, signature=signature, diff_degree=diff_degree, latex_name=latex_name, metric_latex_name=metric_latex_name, start_index=start_index, unique_tag=unique_tag()) if (structure == 'degenerate_metric'): return DegenerateManifold(dim, name, metric_name=metric_name, signature=signature, diff_degree=diff_degree, latex_name=latex_name, metric_latex_name=metric_latex_name, start_index=start_index, unique_tag=unique_tag()) return PseudoRiemannianManifold(dim, name, metric_name=metric_name, signature=signature, diff_degree=diff_degree, latex_name=latex_name, metric_latex_name=metric_latex_name, start_index=start_index, unique_tag=unique_tag()) raise NotImplementedError(('manifolds of type {} are '.format(structure) + 'not implemented'))
def FFT_selection(vio_nodes, dist_matrix, k=10): assert (k > 1), 'invalid k' if (len(vio_nodes) <= 1): return vio_nodes chosen = [np.random.randint(len(vio_nodes))] dist_map = dict() max_dist = p = 0 for i in range(len(vio_nodes)): if (i != chosen[(- 1)]): dist = dist_matrix[(i, chosen[(- 1)])] dist_map[i] = dist if (dist > max_dist): max_dist = dist p = i del dist_map[p] chosen.append(p) for kk in range(2, k): if (len(dist_map) == 0): break max_dist = p = 0 for (pt, val) in dist_map.items(): dist = min(val, dist_matrix[(pt, chosen[(- 1)])]) if (dist < val): dist_map[pt] = dist if (dist > max_dist): max_dist = dist p = pt del dist_map[p] chosen.append(p) return [vio_nodes[c] for c in chosen]
def add_arguments(parser=None): if (parser is None): parser = argparse.ArgumentParser(('Script to ' + help)) parser.add_argument('file', help='path to input particle file') parser.add_argument('-o', '--output', help='path to output directory') parser.add_argument('--format', dest='_from', choices=['auto', 'coord', 'star'], default='auto', help='file format of the INPUT file. outputs will be written in the same format. (default: detect format automatically based on file extension)') parser.add_argument('--suffix', default='', help='suffix to append to file names (default: none)') parser.add_argument('-t', '--threshold', type=float, default=(- np.inf), help='threshold the particles by score (optional)') return parser
def visualize(state_list, env_name, num_steps): env = envs.create(env_name=env_name, episode_length=num_steps) visual_states = [] for i in range(state_list.qp.ang.shape[0]): qp_state = brax.QP(np.array(state_list.qp.pos[(i, 0)]), np.array(state_list.qp.rot[(i, 0)]), np.array(state_list.qp.vel[(i, 0)]), np.array(state_list.qp.ang[(i, 0)])) visual_states.append(qp_state) html_string = html.render(env.sys, visual_states) components.html(html_string, height=500)
def download_image_from_url_test(url): basename = os.path.basename(url) filename = os.path.join(storage_dir, 'test', basename) download_file(url, filename)
def lisp_to_nested_expression(lisp_string: str) -> List: stack: List = [] current_expression: List = [] tokens = lisp_string.split() for token in tokens: while (token[0] == '('): nested_expression: List = [] current_expression.append(nested_expression) stack.append(current_expression) current_expression = nested_expression token = token[1:] current_expression.append(token.replace(')', '')) while (token[(- 1)] == ')'): current_expression = stack.pop() token = token[:(- 1)] return current_expression[0]
def save_npy(data, save_name='op', save_dir='.'): return mx.symbol.Custom(data=data, save_name=save_name, save_dir=save_dir, op_type='save_npy')
def find_ufunc(behavior: (Mapping | None), signature: tuple) -> (UfuncLike | None): if all(((s is not None) for s in signature)): behavior = overlay_behavior(behavior) if all((isinstance(x, str) for x in signature)): return behavior.get(signature) else: for (key, custom) in behavior.items(): if (isinstance(key, tuple) and (len(key) == len(signature)) and (key[0] == signature[0]) and all((((k == s) or (isinstance(k, type) and isinstance(s, type) and issubclass(s, k))) for (k, s) in zip(key[1:], signature[1:])))): return custom return None
class MXMNet(nn.Module): def __init__(self, config: Config, num_spherical=7, num_radial=6, envelope_exponent=5): super(MXMNet, self).__init__() self.dim = config.dim self.n_layer = config.n_layer self.cutoff = config.cutoff self.embeddings = nn.Parameter(torch.ones((5, self.dim))) self.rbf_l = BesselBasisLayer(16, 5, envelope_exponent) self.rbf_g = BesselBasisLayer(16, self.cutoff, envelope_exponent) self.sbf = SphericalBasisLayer(num_spherical, num_radial, 5, envelope_exponent) self.rbf_g_mlp = MLP([16, self.dim]) self.rbf_l_mlp = MLP([16, self.dim]) self.sbf_1_mlp = MLP([(num_spherical * num_radial), self.dim]) self.sbf_2_mlp = MLP([(num_spherical * num_radial), self.dim]) self.global_layers = torch.nn.ModuleList() for layer in range(config.n_layer): self.global_layers.append(Global_MP(config)) self.local_layers = torch.nn.ModuleList() for layer in range(config.n_layer): self.local_layers.append(Local_MP(config)) self.init() def init(self): stdv = math.sqrt(3) self.embeddings.data.uniform_((- stdv), stdv) def indices(self, edge_index, num_nodes): (row, col) = edge_index value = torch.arange(row.size(0), device=row.device) adj_t = SparseTensor(row=col, col=row, value=value, sparse_sizes=(num_nodes, num_nodes)) adj_t_row = adj_t[row] num_triplets = adj_t_row.set_value(None).sum(dim=1).to(torch.long) idx_i = col.repeat_interleave(num_triplets) idx_j = row.repeat_interleave(num_triplets) idx_k = adj_t_row.storage.col() mask = (idx_i != idx_k) (idx_i_1, idx_j, idx_k) = (idx_i[mask], idx_j[mask], idx_k[mask]) idx_kj = adj_t_row.storage.value()[mask] idx_ji_1 = adj_t_row.storage.row()[mask] adj_t_col = adj_t[col] num_pairs = adj_t_col.set_value(None).sum(dim=1).to(torch.long) idx_i_2 = row.repeat_interleave(num_pairs) idx_j1 = col.repeat_interleave(num_pairs) idx_j2 = adj_t_col.storage.col() idx_ji_2 = adj_t_col.storage.row() idx_jj = adj_t_col.storage.value() return (idx_i_1, idx_j, idx_k, idx_kj, idx_ji_1, idx_i_2, idx_j1, idx_j2, idx_jj, idx_ji_2) def forward(self, data): x = data.x edge_index = data.edge_index pos = data.pos batch = data.batch h = torch.index_select(self.embeddings, 0, x.long()) (edge_index_l, _) = remove_self_loops(edge_index) (j_l, i_l) = edge_index_l dist_l = (pos[i_l] - pos[j_l]).pow(2).sum(dim=(- 1)).sqrt() (row, col) = radius(pos, pos, self.cutoff, batch, batch, max_num_neighbors=500) edge_index_g = torch.stack([row, col], dim=0) (edge_index_g, _) = remove_self_loops(edge_index_g) (j_g, i_g) = edge_index_g dist_g = (pos[i_g] - pos[j_g]).pow(2).sum(dim=(- 1)).sqrt() (idx_i_1, idx_j, idx_k, idx_kj, idx_ji, idx_i_2, idx_j1, idx_j2, idx_jj, idx_ji_2) = self.indices(edge_index_l, num_nodes=h.size(0)) (pos_ji_1, pos_kj) = ((pos[idx_j] - pos[idx_i_1]), (pos[idx_k] - pos[idx_j])) a = (pos_ji_1 * pos_kj).sum(dim=(- 1)) b = torch.cross(pos_ji_1, pos_kj).norm(dim=(- 1)) angle_1 = torch.atan2(b, a) (pos_ji_2, pos_jj) = ((pos[idx_j1] - pos[idx_i_2]), (pos[idx_j2] - pos[idx_j1])) a = (pos_ji_2 * pos_jj).sum(dim=(- 1)) b = torch.cross(pos_ji_2, pos_jj).norm(dim=(- 1)) angle_2 = torch.atan2(b, a) rbf_g = self.rbf_g(dist_g) rbf_l = self.rbf_l(dist_l) sbf_1 = self.sbf(dist_l, angle_1, idx_kj) sbf_2 = self.sbf(dist_l, angle_2, idx_jj) rbf_g = self.rbf_g_mlp(rbf_g) rbf_l = self.rbf_l_mlp(rbf_l) sbf_1 = self.sbf_1_mlp(sbf_1) sbf_2 = self.sbf_2_mlp(sbf_2) node_sum = 0 for layer in range(self.n_layer): h = self.global_layers[layer](h, rbf_g, edge_index_g) (h, t) = self.local_layers[layer](h, rbf_l, sbf_1, sbf_2, idx_kj, idx_ji, idx_jj, idx_ji_2, edge_index_l) node_sum += t output = global_add_pool(node_sum, batch) return output.view((- 1))
def main(args): dss = [] for dataset_path in args.dataset: dataset = load_dataset(dataset_path, split='train', data_files='*.arrow') dss.append(dataset) ds = concatenate_datasets(dss) ds = ds.shuffle() ds.save_to_disk(args.output_folder)
def weights_init_G(m): if (m.__class__.__name__.find('Conv') != (- 1)): nn.init.xavier_normal_(m.weight, 0.1) if hasattr(m.bias, 'data'): m.bias.data.fill_(0)
class TagToken(ElementSetToken): def __init__(self, tag, classes=None): super(TagToken, self).__init__(classes) self._tag = tag def _execute(self, env): tag_matches = [elem for elem in env.elements if ((self._tag == elem.tag) and self._class_match(elem))] return ElementSet(tag_matches) def return_type(self): return ElementSet def __str__(self): return 'TagToken({}, {})'.format(self._tag, self._classes)
class LegacyFairseqCriterion(FairseqCriterion): def __init__(self, args, task): super().__init__(task=task) self.args = args utils.deprecation_warning('Criterions should take explicit arguments instead of an argparse.Namespace object, please update your criterion by extending FairseqCriterion instead of LegacyFairseqCriterion.') def build_criterion(cls, args, task): return cls(args, task)
def calculate_CLs(bkgonly_json, signal_patch_json): workspace = pyhf.workspace.Workspace(bkgonly_json) model = workspace.model(measurement_name=None, patches=[signal_patch_json], modifier_settings={'normsys': {'interpcode': 'code4'}, 'histosys': {'interpcode': 'code4p'}}) result = pyhf.infer.hypotest(1.0, workspace.data(model), model, test_stat='qtilde', return_expected_set=True) return (result[0].tolist(), result[(- 1)])
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1)) filters = int((filters * alpha)) x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs) x = Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return Activation(relu6, name='conv1_relu')(x)
def test_success_remove_option_type(): array = ak.Array(ak.contents.ListOffsetArray(ak.index.Index64(np.array([1, 3], dtype=np.int64)), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 2, 2, 3], dtype=np.int64)), ak.contents.NumpyArray(np.array([0, 1, 2], dtype=np.int64)))), check_valid=True) index = ak.Array(ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 2], dtype=np.int64)), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 0, 1], dtype=np.int64)), ak.contents.NumpyArray(np.array([0], dtype=np.int64)))), check_valid=True) assert (to_list(array[index]) == [[[], [2]]])
('/index.html') def read_index(): return render_template(AUTOMATIC_HTML_FILE, server_debug_mode=server_debug_mode)
class TestNeighSampler(unittest.TestCase): def setUp(self) -> None: self.adjacency = test_graph() self.n = self.adjacency.shape[0] def test_uni_node_sampler(self): uni_sampler = UniformNeighborSampler(sample_size=2) sampled_adj = uni_sampler(self.adjacency) self.assertTrue((sampled_adj.shape == self.adjacency.shape)) self.assertTrue(all((get_degrees(sampled_adj) <= 2)))
def Convolutional_Block(inputs, shortcut, num_filters, name, is_training): print(('-' * 20)) print('Convolutional Block', str(num_filters), name) print(('-' * 20)) with tf.variable_scope(((('conv_block_' + str(num_filters)) + '_') + name)): for i in range(2): with tf.variable_scope(('conv1d_%s' % str(i))): filter_shape = [3, inputs.get_shape()[2], num_filters] W = tf.get_variable(name='W', shape=filter_shape, initializer=he_normal, regularizer=regularizer) inputs = tf.nn.conv1d(inputs, W, stride=1, padding='SAME') inputs = tf.nn.relu(inputs) print('Conv1D:', inputs.get_shape()) print(('-' * 20)) if (shortcut is not None): print(('-' * 5)) print('Optional Shortcut:', shortcut.get_shape()) print(('-' * 5)) return (inputs + shortcut) return inputs
class KeywordsOper(): def get_search_keywords(cls): return db_session.query(KeyWords.keyword, KeyWords.id).filter(text('enable=1')).all() _commit_decorator def set_useless_keyword(cls, keyword): search_word = db_session.query(KeyWords).filter((KeyWords.keyword == keyword)).first() search_word.enable = 0 db_session.commit()
class clear_and_catch_warnings(warnings.catch_warnings): class_modules = () def __init__(self, record=False, modules=()): self.modules = set(modules).union(self.class_modules) self._warnreg_copies = {} super(clear_and_catch_warnings, self).__init__(record=record) def __enter__(self): for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod_reg = mod.__warningregistry__ self._warnreg_copies[mod] = mod_reg.copy() mod_reg.clear() return super(clear_and_catch_warnings, self).__enter__() def __exit__(self, *exc_info): super(clear_and_catch_warnings, self).__exit__(*exc_info) for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod.__warningregistry__.clear() if (mod in self._warnreg_copies): mod.__warningregistry__.update(self._warnreg_copies[mod])
def random_simplicial_complex(level=1, p=0.5): n = randint(2, (4 * level)) dim = randint(1, n) return RandomComplex(n, dim, p)
def register_Ns3CallbackImpl__Void_Unsigned_long_Unsigned_short_Unsigned_short_Ns3UeManagerState_Ns3UeManagerState_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, unsigned long, unsigned short, unsigned short, ns3::UeManager::State, ns3::UeManager::State, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('long unsigned int', 'arg0'), param('short unsigned int', 'arg1'), param('short unsigned int', 'arg2'), param('ns3::UeManager::State', 'arg3'), param('ns3::UeManager::State', 'arg4')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
def test_legal_action(): board = make_test_boad() playable_dice = jnp.array([3, 2, (- 1), (- 1)], dtype=jnp.int32) expected_legal_action_mask: jnp.ndarray = jnp.zeros((6 * 26), dtype=jnp.bool_) expected_legal_action_mask = expected_legal_action_mask.at[((6 * (19 + 2)) + 3)].set(True) expected_legal_action_mask = expected_legal_action_mask.at[((6 * (20 + 2)) + 2)].set(True) expected_legal_action_mask = expected_legal_action_mask.at[((6 * (20 + 2)) + 3)].set(True) expected_legal_action_mask = expected_legal_action_mask.at[((6 * (21 + 2)) + 2)].set(True) legal_action_mask = _legal_action_mask(board, playable_dice) print(jnp.where((legal_action_mask != 0))[0]) print(jnp.where((expected_legal_action_mask != 0))[0]) assert (expected_legal_action_mask == legal_action_mask).all() playable_dice = jnp.array([5, 5, 5, 5], dtype=jnp.int32) expected_legal_action_mask = jnp.zeros((6 * 26), dtype=jnp.bool_) expected_legal_action_mask = expected_legal_action_mask.at[((6 * (19 + 2)) + 5)].set(True) legal_action_mask = _legal_action_mask(board, playable_dice) assert (expected_legal_action_mask == legal_action_mask).all() board = _flip_board(board) playable_dice = jnp.array([4, 1, (- 1), (- 1)], dtype=jnp.int32) expected_legal_action_mask: jnp.ndarray = jnp.zeros((6 * 26), dtype=jnp.bool_) expected_legal_action_mask = expected_legal_action_mask.at[((6 * 1) + 1)].set(True) legal_action_mask = _legal_action_mask(board, playable_dice) assert (expected_legal_action_mask == legal_action_mask).all() playable_dice = jnp.array([4, 4, 4, 4], dtype=jnp.int32) expected_legal_action_mask = jnp.zeros((6 * 26), dtype=jnp.bool_) expected_legal_action_mask = expected_legal_action_mask.at[0:6].set(True) legal_action_mask = _legal_action_mask(board, playable_dice) assert (expected_legal_action_mask == legal_action_mask).all()
class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>'] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, 'w') as fp: fp.write('\n'.join(merges)) def get_input_output_texts(self, tokenizer): input_text = 'lower newer' output_text = 'lower newer' return (input_text, output_text) def test_full_tokenizer(self): 'Adapted from Sennrich et al. 2015 and tokenizer = XLMTokenizer(self.vocab_file, self.merges_file) text = 'lower' bpe_tokens = ['low', 'er</w>'] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = (tokens + ['<unk>']) input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_sequence_builders(self): tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') text = tokenizer.encode('sequence builders', add_special_tokens=False) text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert (encoded_sentence == (([0] + text) + [1])) assert (encoded_pair == (((([0] + text) + [1]) + text_2) + [1]))
def arguments(func: FunctionSchema, *, method: bool=False) -> Sequence[CppArgument]: return list(map(argument, group_arguments(func, method=method)))
def FormsSpace(analytic_type, group=3, base_ring=ZZ, k=QQ(0), ep=None): from .space import canonical_parameters (group, base_ring, k, ep, n) = canonical_parameters(group, base_ring, k, ep) from .analytic_type import AnalyticType AT = AnalyticType() analytic_type = AT(analytic_type) if (analytic_type <= AT('mero')): if (analytic_type <= AT('weak')): if (analytic_type <= AT('holo')): if (analytic_type <= AT('cusp')): if (analytic_type <= AT([])): from .space import ZeroForm return ZeroForm(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import CuspForms return CuspForms(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import ModularForms return ModularForms(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import WeakModularForms return WeakModularForms(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import MeromorphicModularForms return MeromorphicModularForms(group=group, base_ring=base_ring, k=k, ep=ep) elif (analytic_type <= AT(['mero', 'quasi'])): if (analytic_type <= AT(['weak', 'quasi'])): if (analytic_type <= AT(['holo', 'quasi'])): if (analytic_type <= AT(['cusp', 'quasi'])): if (analytic_type <= AT(['quasi'])): raise ValueError('There should be only non-quasi ZeroForms. That could be changed but then this exception should be removed.') from .space import ZeroForm return ZeroForm(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import QuasiCuspForms return QuasiCuspForms(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import QuasiModularForms return QuasiModularForms(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import QuasiWeakModularForms return QuasiWeakModularForms(group=group, base_ring=base_ring, k=k, ep=ep) else: from .space import QuasiMeromorphicModularForms return QuasiMeromorphicModularForms(group=group, base_ring=base_ring, k=k, ep=ep) else: raise NotImplementedError('Analytic type not implemented.')
def create_extra_val_loader(args, dataset, val_input_transform, target_transform, val_sampler): if (dataset == 'cityscapes'): val_set = cityscapes.CityScapes('fine', 'val', 0, transform=val_input_transform, target_transform=target_transform, cv_split=args.cv, image_in=args.image_in) elif (dataset == 'bdd100k'): val_set = bdd100k.BDD100K('val', 0, transform=val_input_transform, target_transform=target_transform, cv_split=args.cv, image_in=args.image_in) elif (dataset == 'gtav'): val_set = gtav.GTAV('val', 0, transform=val_input_transform, target_transform=target_transform, cv_split=args.cv, image_in=args.image_in) elif (dataset == 'synthia'): val_set = synthia.Synthia('val', 0, transform=val_input_transform, target_transform=target_transform, cv_split=args.cv, image_in=args.image_in) elif (dataset == 'mapillary'): eval_size = 1536 val_joint_transform_list = [joint_transforms.ResizeHeight(eval_size), joint_transforms.CenterCropPad(eval_size)] val_set = mapillary.Mapillary('semantic', 'val', joint_transform_list=val_joint_transform_list, transform=val_input_transform, target_transform=target_transform, test=False) elif (dataset == 'null_loader'): val_set = nullloader.nullloader(args.crop_size) else: raise Exception('Dataset {} is not supported'.format(dataset)) if args.syncbn: from datasets.sampler import DistributedSampler val_sampler = DistributedSampler(val_set, pad=False, permutation=False, consecutive_sample=False) else: val_sampler = None val_loader = DataLoader(val_set, batch_size=args.val_batch_size, num_workers=(args.num_workers // 2), shuffle=False, drop_last=False, sampler=val_sampler) return val_loader
def VDCNN9KMaxPool(num_classes=5, shortcut=False, bias=False): return VDCNN(KMaxPoolBlock, blocks=[1, 1, 1, 1], filters=[64, 128, 256, 512], num_classes=num_classes, shortcut=shortcut, bias=bias)
def nullspace_RR(n=300, min=0, max=10, system='sage'): if (system == 'sage'): from sage.rings.real_mpfr import RR A = random_matrix(ZZ, (n + 1), n, x=min, y=(max + 1)).change_ring(RR) t = cputime() v = A.kernel() return cputime(t) elif (system == 'magma'): code = ('\nn := %s;\nA := RMatrixSpace(RealField(16), n+1,n)![Random(%s,%s) : i in [1..n*(n+1)]];\nt := Cputime();\nK := Kernel(A);\ns := Cputime(t);\n' % (n, min, max)) if verbose: print(code) magma.eval(code) return float(magma.eval('s')) else: raise ValueError(('unknown system "%s"' % system))
def gp(): global _gp if (_gp is None): _gp = Gp(script_subdirectory='buzzard') _gp.read('DimensionSk.g') _gp.read('genusn.g') _gp.read('Tpprog.g') return _gp
def setup_logger(args): os.makedirs(os.path.join(args.logdir, args.name), exist_ok=True) logging.config.dictConfig({'version': 1, 'disable_existing_loggers': False, 'formatters': {'standard': {'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'}}, 'handlers': {'stderr': {'level': 'INFO', 'formatter': 'standard', 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stderr'}, 'logfile': {'level': 'DEBUG', 'formatter': 'standard', 'class': 'logging.FileHandler', 'filename': os.path.join(args.logdir, args.name, 'train.log'), 'mode': 'a'}}, 'loggers': {'': {'handlers': ['stderr', 'logfile'], 'level': 'DEBUG', 'propagate': True}}}) logger = logging.getLogger(__name__) logger.flush = (lambda : [h.flush() for h in logger.handlers]) logger.info(args) return logger
_numpy_output(non_zero=True, check_dtype=True) def test_ufunc_remainder_ss(A: dace.int32[10], B: dace.int32[10]): return np.remainder(A, B)
class CalvinEnv(PlayTableSimEnv): def __init__(self, tasks: dict={}, **kwargs): self.max_episode_steps = kwargs.pop('max_episode_steps') self.reward_norm = kwargs.pop('reward_norm') [kwargs.pop(key) for key in ['id', 'screen_size', 'action_repeat', 'frame_stack', 'absorbing_state', 'pixel_ob', 'state_ob', 'num_sequences', 'data_path', 'save_dir', 'record']] super().__init__(**kwargs) self.action_space = spaces.Box(low=(- 1), high=1, shape=(7,)) self.observation_space = spaces.Box(low=(- 1), high=1, shape=(21,)) self.tasks = hydra.utils.instantiate(tasks) self.target_tasks = list(self.tasks.tasks.keys()) self.tasks_to_complete = copy.deepcopy(self.target_tasks) self.completed_tasks = [] self.solved_subtasks = defaultdict((lambda : 0)) self._t = 0 self.sequential = False def reset(self): obs = super().reset() self.start_info = self.get_info() self._t = 0 self.tasks_to_complete = copy.deepcopy(self.target_tasks) self.completed_tasks = [] self.solved_subtasks = defaultdict((lambda : 0)) return obs def reset_to_state(self, state): return super().reset(robot_obs=state[:15], scene_obs=state[15:]) def get_obs(self): obs = self.get_state_obs() return np.concatenate([obs['robot_obs'], obs['scene_obs']])[:21] def _reward(self): current_info = self.get_info() completed_tasks = self.tasks.get_task_info_for_set(self.start_info, current_info, self.target_tasks) next_task = self.tasks_to_complete[0] reward = 0 for task in list(completed_tasks): if self.sequential: if (task == next_task): reward += 1 self.tasks_to_complete.pop(0) self.completed_tasks.append(task) elif (task in self.tasks_to_complete): reward += 1 self.tasks_to_complete.remove(task) self.completed_tasks.append(task) reward *= self.reward_norm r_info = {'reward': reward} return (reward, r_info) def _termination(self): done = (len(self.tasks_to_complete) == 0) d_info = {'success': done} return (done, d_info) def _postprocess_info(self, info): for task in self.target_tasks: self.solved_subtasks[task] = (1 if ((task in self.completed_tasks) or self.solved_subtasks[task]) else 0) return info def step(self, action): env_action = action.copy() env_action[(- 1)] = ((int((action[(- 1)] >= 0)) * 2) - 1) self.robot.apply_action(env_action) for _ in range(self.action_repeat): self.p.stepSimulation(physicsClientId=self.cid) self.scene.step() obs = self.get_obs() info = self.get_info() (reward, r_info) = self._reward() (done, d_info) = self._termination() info.update(r_info) info.update(d_info) self._t += 1 if (self._t >= self.max_episode_steps): done = True return (obs, reward, done, self._postprocess_info(info)) def val_mode(self): pass (yield) pass def get_episode_info(self): completed_tasks = (self.completed_tasks if (len(self.completed_tasks) > 0) else [None]) info = dict(solved_subtask=completed_tasks, tasks_to_complete=self.tasks_to_complete) info.update(self.solved_subtasks) return info
_utils.test(require=ti.extension.assertion, debug=True) def test_skip_grad_replaced(): N = 16 x = ti.field(dtype=ti.f32, shape=N, needs_grad=True) loss = ti.field(dtype=ti.f32, shape=(), needs_grad=True) b = ti.field(dtype=ti.f32, shape=(), needs_grad=True) def kernel_1(): loss[None] = (x[1] * b[None]) b[None] += 100 .grad_replaced def kernel_2(): loss[None] = (x[1] * b[None]) b[None] += 100 .grad_for(kernel_2) def kernel_2_grad(): pass for i in range(N): x[i] = i b[None] = 10 loss.grad[None] = 1 with pytest.raises(ti.TaichiAssertionError): with ti.ad.Tape(loss=loss, validation=True): kernel_1() with ti.ad.Tape(loss=loss, validation=True): kernel_2()
def pad_list(list_: List, pad_element: Any, pad_to_length: int) -> List: return (list_ + [pad_element for _ in range((pad_to_length - len(list_)))])
def display(d): img = draw_keypoints((d['image'][(..., 0)] * 255), np.where(d['keypoint_map']), (0, 255, 0)) draw_overlay(img, np.logical_not(d['valid_mask'])) return img
def export_digraph(booster, tree_index=0, out_file=None): if (not isinstance(booster, BaseBoostedRelationalModel)): raise TypeError('booster must inherit from BaseBoostedRelationalModel.') dotfiles = booster._dotfiles if (not (0 <= tree_index < len(dotfiles))): raise IndexError('tree_index is out of range.') if out_file: with open(out_file, 'w') as _fh: _fh.write(dotfiles[tree_index]) else: return dotfiles[tree_index]
class EnsemblePredictor(object): def __init__(self, config_path, model_args, data_args, gpu_ids, device, logger=None): (task2models, aggregation_fn) = self.get_config(config_path) self.task2models = task2models self.aggregation_fn = aggregation_fn self.model_args = model_args self.data_args = data_args self.gpu_ids = gpu_ids self.device = device self.logger = logger def get_config(self, config_path): with open(config_path, 'r') as json_fh: self.config_dict = json.load(json_fh) task2models = self.config_dict[CFG_TASK2MODELS] agg_method = self.config_dict[CFG_AGG_METHOD] if (agg_method == 'max'): aggregation_fn = np.max elif (agg_method == 'mean'): aggregation_fn = np.mean else: raise ValueError(((f'Invalid configuration: ' + f'{CFG_AGG_METHOD} = {agg_method} ') + '(expected "max" or "mean")')) return (task2models, aggregation_fn) def save_config(self): config_save_path = (self.logger.results_dir / 'config.json') self.logger.log(f'Saving config to {config_save_path}.') with open(config_save_path, 'w') as f: json.dump(self.config_dict, f, indent=4) def predict(self, cache=False, return_paths=False, all_gt_tasks=False): is_cached = False if (cache and (self.logger is not None)): results_dir = self.logger.results_dir self.predictions_path = (results_dir / 'ensemble_predictions.csv') self.groundtruth_path = (results_dir / 'groundtruth.csv') if (self.predictions_path.exists() and self.groundtruth_path.exists()): self.logger.log((f'Predictions at {self.predictions_path} ' + 'already exist. Loading from this file.')) ensemble_probs_df = pd.read_csv(self.predictions_path) ensemble_gt_df = pd.read_csv(self.groundtruth_path) is_cached = True elif cache: raise ValueError(('Must instantiate Predictor with logger' + 'if caching.')) ensemble_paths = None if (not is_cached): model2probs = {} model2gt = {} task2ensemble_probs = {} task2gt = {} self.save_config() for (task, model_dicts) in self.task2models.items(): print('[]') print(task) for model_dict in model_dicts: ckpt_path = Path(model_dict[CFG_CKPT_PATH]) is_3class = model_dict[CFG_IS_3CLASS] if (ckpt_path in model2probs): continue ckpt_save_dir = Path(ckpt_path).parent results_parent_dir = (ckpt_save_dir / 'results') results_dir = (results_parent_dir / self.data_args.phase) results_dir.mkdir(parents=True, exist_ok=True) ckpt_iter = ckpt_path.stem.split('.')[0] predictions_name = f'{ckpt_iter}-predictions.csv' groundtruth_name = f'{ckpt_iter}-groundtruth.csv' predictions_path = (results_dir / predictions_name) groundtruth_path = (results_dir / groundtruth_name) if (cache and (predictions_path.exists() and groundtruth_path.exists())): self.logger.log(((f'Predictions at {predictions_path}' + ' already exist. Loading from this ') + 'file.')) probs_df = pd.read_csv(predictions_path, dtype=np.float32) gt_df = pd.read_csv(groundtruth_path, dtype=np.float32) else: dataset = self.data_args.dataset (model_args, transform_args) = ModelSaver.get_args(cl_model_args=self.model_args, dataset=dataset, ckpt_save_dir=ckpt_save_dir, model_uncertainty=is_3class) model_args.moco = self.model_args.moco (model, ckpt_info) = ModelSaver.load_model(ckpt_path=ckpt_path, gpu_ids=self.gpu_ids, model_args=model_args, is_training=False) predictor = Predictor(model=model, device=self.device) loader = get_loader(phase=self.data_args.phase, data_args=self.data_args, transform_args=transform_args, is_training=False, return_info_dict=return_paths, logger=self.logger) if loader.dataset.return_info_dict: (probs_df, gt_df, paths) = predictor.predict(loader) if (ensemble_paths is None): ensemble_paths = paths else: (probs_df, gt_df) = predictor.predict(loader) if cache: self.logger.log(('Writing predictions to ' + f'{predictions_path}.')) probs_df.to_csv(predictions_path, index=False) self.logger.log(('Writing groundtruth to ' + f'{groundtruth_path}.')) gt_df.to_csv(groundtruth_path, index=False) model2probs[ckpt_path] = probs_df model2gt[ckpt_path] = gt_df task_ckpt_probs = [model2probs[Path(model_dict[CFG_CKPT_PATH])][task] for model_dict in model_dicts] task2ensemble_probs[task] = self.aggregation_fn(task_ckpt_probs, axis=0) if (len(model_dicts) > 0): first_gt = model2gt[Path(model_dicts[0][CFG_CKPT_PATH])][task] task2gt[task] = model2gt[Path(model_dicts[0][CFG_CKPT_PATH])][task] ensemble_probs_df = pd.DataFrame({task: task2ensemble_probs[task] for task in self.task2models if (task in task2gt)}) if all_gt_tasks: ensemble_gt_df = model2gt[Path(model_dicts[0][CFG_CKPT_PATH])] else: ensemble_gt_df = pd.DataFrame({task: task2gt[task] for task in self.task2models}) if cache: self.logger.log(f'Writing predictions to {self.predictions_path}.') ensemble_probs_df.to_csv(self.predictions_path, index=False) self.logger.log(f'Writing groundtruth to {self.groundtruth_path}.') ensemble_gt_df.to_csv(self.groundtruth_path, index=False) if return_paths: return (ensemble_probs_df, ensemble_gt_df, ensemble_paths) return (ensemble_probs_df, ensemble_gt_df)
def test_download_not_repeated(): with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as test_dir: stanza.download('en', model_dir=test_dir, processors='tokenize', package='combined') assert (sorted(os.listdir(test_dir)) == ['en', 'resources.json']) en_dir = os.path.join(test_dir, 'en') en_dir_listing = sorted(os.listdir(en_dir)) assert (en_dir_listing == ['mwt', 'tokenize']) tokenize_path = os.path.join(en_dir, 'tokenize', 'combined.pt') mod_time = os.path.getmtime(tokenize_path) pipe = stanza.Pipeline('en', model_dir=test_dir, processors='tokenize', package={'tokenize': 'combined'}) assert (os.path.getmtime(tokenize_path) == mod_time)
def main(args, config): utils.init_distributed_mode(args) device = torch.device(args.gpu) seed = (args.seed + utils.get_rank()) torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True cudnn.deterministic = True print('Creating dataset') datasets = [create_dataset('pretrain', config, min_scale=0.2)] print(('number of training samples: %d' % len(datasets[0]))) num_tasks = utils.get_world_size() global_rank = utils.get_rank() samplers = create_sampler(datasets, [True], num_tasks, global_rank) data_loader = create_loader(datasets, samplers, batch_size=[config['batch_size']], num_workers=[args.num_workers], is_trains=[True], collate_fns=[None])[0] print(('=' * 50)) print('time now is: ') print(time.strftime('%Y/%m/%d %H:%M:%S')) print(('=' * 50)) print('Creating model') model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size']) model = model.cuda() optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay']) start_epoch = 0 if args.checkpoint: checkpoint = torch.load(args.checkpoint, map_location='cpu') state_dict = checkpoint['model'] model.load_state_dict(state_dict) optimizer.load_state_dict(checkpoint['optimizer']) start_epoch = (checkpoint['epoch'] + 1) print(('resume checkpoint from %s' % args.checkpoint)) model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module print('Start training') start_time = time.time() for epoch in range(start_epoch, config['max_epoch']): step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate']) train_stats = train(model, data_loader, optimizer, epoch, device, config) if utils.is_main_process(): log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch} save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config, 'epoch': epoch} torch.save(save_obj, os.path.join(args.output_dir, ('checkpoint_%02d.pth' % epoch))) with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f: f.write((json.dumps(log_stats) + '\n')) dist.barrier() total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
def get_paraphrase_prompt(gpt3, prompt, ent_tuple): assert (get_n_ents(prompt) == len(ent_tuple)) ent_tuple = [ent.lower() for ent in ent_tuple] sent = get_sent(prompt=prompt, ent_tuple=ent_tuple) for _ in range(5): raw_response = gpt3.call(prompt=f'''paraphrase: {sent} ''') para_sent = raw_response['choices'][0]['text'] para_sent = sent_tokenize(para_sent)[0] para_sent = para_sent.strip().strip('.').lower() print('para_sent:', para_sent) prompt = para_sent valid = True for (idx, ent) in enumerate(ent_tuple): for trans_sent in TRANSFORMATIONS_SENT: for trans_ent in TRANSFORMATIONS_ENT: if (prompt.count(f'<ENT{idx}>') == 0): transed_prompt = prompt.replace(*trans_sent) transed_ent = ent.replace(*trans_ent) if (transed_prompt.count(transed_ent) == 1): prompt = transed_prompt.replace(transed_ent, f'<ENT{idx}>') if (prompt.count(f'<ENT{idx}>') != 1): valid = False break if valid: return prompt return None
def test_coefficient_tracker_can_shift_weighted_sampled_based_on_configured_transition_period(): with mock.patch('obp.simulator.coefficient_drifter.sample_random_uniform_coefficients', MockCoefSample().fake_sample): drifter = CoefficientDrifter(drift_interval=4, transition_period=2, transition_type='weighted_sampled', effective_dim_context=2, effective_dim_action_context=2) (actual_context_coef, _, _) = drifter.get_coefficients(n_rounds=8) expected_context_coef = np.asarray([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.0, 2.0], [2.0, 2.0], [3.0, 3.0], [3.0, 3.0]]) assert np.allclose(actual_context_coef, expected_context_coef)
class LinearCameraCal(object): __slots__ = ['data'] if T.TYPE_CHECKING: data = [] def __init__(self, focal_length, principal_point): self.data = [] if isinstance(focal_length, numpy.ndarray): if (focal_length.shape in [(2, 1), (1, 2)]): focal_length = focal_length.flatten() elif (focal_length.shape != (2,)): raise IndexError('Expected focal_length to be a vector of length 2; instead had shape {}'.format(focal_length.shape)) elif (len(focal_length) != 2): raise IndexError('Expected focal_length to be a sequence of length 2, was instead length {}.'.format(len(focal_length))) if isinstance(principal_point, numpy.ndarray): if (principal_point.shape in [(2, 1), (1, 2)]): principal_point = principal_point.flatten() elif (principal_point.shape != (2,)): raise IndexError('Expected principal_point to be a vector of length 2; instead had shape {}'.format(principal_point.shape)) elif (len(principal_point) != 2): raise IndexError('Expected principal_point to be a sequence of length 2, was instead length {}.'.format(len(principal_point))) self.data.extend(focal_length) self.data.extend(principal_point) def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, self.data) def focal_length(self): return ops.CameraOps.focal_length(self) def principal_point(self): return ops.CameraOps.principal_point(self) def pixel_from_camera_point(self, point, epsilon): return ops.CameraOps.pixel_from_camera_point(self, point, epsilon) def pixel_from_camera_point_with_jacobians(self, point, epsilon): return ops.CameraOps.pixel_from_camera_point_with_jacobians(self, point, epsilon) def camera_ray_from_pixel(self, pixel, epsilon): return ops.CameraOps.camera_ray_from_pixel(self, pixel, epsilon) def camera_ray_from_pixel_with_jacobians(self, pixel, epsilon): return ops.CameraOps.camera_ray_from_pixel_with_jacobians(self, pixel, epsilon) def storage_dim(): return 4 def to_storage(self): return list(self.data) def from_storage(cls, vec): instance = cls.__new__(cls) if isinstance(vec, list): instance.data = vec else: instance.data = list(vec) if (len(vec) != cls.storage_dim()): raise ValueError('{} has storage dim {}, got {}.'.format(cls.__name__, cls.storage_dim(), len(vec))) return instance def tangent_dim(): return 4 def from_tangent(cls, vec, epsilon=1e-08): if (len(vec) != cls.tangent_dim()): raise ValueError('Vector dimension ({}) not equal to tangent space dimension ({}).'.format(len(vec), cls.tangent_dim())) return ops.LieGroupOps.from_tangent(vec, epsilon) def to_tangent(self, epsilon=1e-08): return ops.LieGroupOps.to_tangent(self, epsilon) def retract(self, vec, epsilon=1e-08): if (len(vec) != self.tangent_dim()): raise ValueError('Vector dimension ({}) not equal to tangent space dimension ({}).'.format(len(vec), self.tangent_dim())) return ops.LieGroupOps.retract(self, vec, epsilon) def local_coordinates(self, b, epsilon=1e-08): return ops.LieGroupOps.local_coordinates(self, b, epsilon) def interpolate(self, b, alpha, epsilon=1e-08): return ops.LieGroupOps.interpolate(self, b, alpha, epsilon) def __eq__(self, other): if isinstance(other, LinearCameraCal): return (self.data == other.data) else: return False
def label_smoothing_log_loss(pred, labels, smoothing=0.0): n_class = pred.shape[(- 1)] one_hot = torch.zeros_like(pred) one_hot[labels] = 1.0 one_hot = ((one_hot * (1 - smoothing)) + (((1 - one_hot) * smoothing) / (n_class - 1))) loss = (- (one_hot * pred).sum(dim=(- 1)).mean()) return loss