code
stringlengths
101
5.91M
class TFAutoModelForAudioClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def generate_declaration(input_ids: List[str], partition_fields: Dict[(Node, str)], input_args: Dict[(Node, str)], move_tensors=False) -> str: lines = [(tab + f'''def forward(self, *args): ''')] for (node, field) in chain(partition_fields.items(), input_args.items()): lines.append(f'''{dtab}# {node.scope} <=> {field} ''') if (len(input_ids) == 0): return ''.join(lines) if move_tensors: lines.extend([f''' {dtab}# moving inputs to current device no op if already on the correct device ''', f"{dtab}{', '.join(input_ids)} = move_tensors(unflatten(args, self.input_structure), self.device)"]) else: lines.extend([f"{dtab}{', '.join(input_ids)} = unflatten(args, self.input_structure)"]) if (len(input_ids) == 1): lines[(- 1)] += '[0]' return ''.join(lines)
def test_calculate_recall_float(): pred = torch.tensor([1, 2, 3, 5, 7, 6], dtype=torch.long) true = torch.tensor([1, 2, 3, 4, 5], dtype=torch.long) pred = VariableShapeList.from_tensors([pred]) true = VariableShapeList.from_tensors([true]) recall = vsl_recall(pred, true) assert torch.isclose(recall[0], torch.tensor((4 / 5)))
_grad() def align_meshes_c2f(mesh_w: Meshes, mesh_c: Meshes, az_coarse=np.linspace(0, 360, num=4, endpoint=False, dtype=np.float), el_coarse=np.linspace((- 90), 90, num=3, endpoint=True, dtype=np.float), cr_coarse=np.linspace(0, 360, num=4, endpoint=False, dtype=np.float), s_coarse=[0.7, 1, 1.3], t_coarse=[(- 0.3), 0, 0.3], max_icp_iterations=500) -> Tuple[(Transform3d, bool)]: assert (len(mesh_w) == 1) assert (len(mesh_c) == 1) points_w = sample_points_from_meshes(mesh_w)[0] points_c = sample_points_from_meshes(mesh_c)[0] transforms_w = [] transforms_c = [] device = points_w.device def get_centre(points): return points.mean(dim=0, keepdim=True) centre_w = get_centre(points_w) centre_c = get_centre(points_c) transforms_w.append(Translate((- centre_w), device=device)) transforms_c.append(Translate((- centre_c), device=device)) points_w = transforms_w[(- 1)].transform_points(points_w) points_c = transforms_c[(- 1)].transform_points(points_c) def get_scale(points): return points.abs().max() scale_w = get_scale(points_w) scale_c = get_scale(points_c) transforms_w.append(Scale((1 / (scale_w + 1e-06)), device=device)) transforms_c.append(Scale((1 / (scale_c + 1e-06)), device=device)) points_w = transforms_w[(- 1)].transform_points(points_w) points_c = transforms_c[(- 1)].transform_points(points_c) azelcr_list = [] for az in az_coarse: for el in el_coarse: for cr in ([0] if ((el == 90) or (el == (- 90))) else cr_coarse): azelcr_list.append((az, el, cr)) best_chamfer = float('inf') best_coarse_transform = None best_fine_RTs = None iteration = 0 for azelcr_np in azelcr_list: for s in s_coarse: for T in itertools.product(t_coarse, t_coarse, t_coarse): azelcr = ((torch.as_tensor(azelcr_np, device=device, dtype=torch.float)[None] * np.pi) / 180) R = euler_angles_to_matrix(azelcr, 'YXZ') T = torch.as_tensor(T, device=device, dtype=torch.float)[None] coarse_transforms_list = [Rotate(R, device=device), Scale(s, device=device), Translate(T, device=device)] coarse_transforms = Transform3d(device=device).compose(*coarse_transforms_list) points_w_init = coarse_transforms.transform_points(points_w.clone()) icpsol = iterative_closest_point(points_w_init[None], points_c[None], estimate_scale=True, max_iterations=max_icp_iterations) if (not icpsol.converged): pp = (lambda x: list(x.detach().cpu().numpy())) logger.warn(f'iter {iteration} (azelcr{pp(azelcr[0])}, s{s}, T{pp(T[0])}) align_meshes_c2f ICP did not converge. rmse {float(icpsol.rmse.item())}') else: logger.debug(f'align_meshes_c2f ICP converged. rmse {float(icpsol.rmse.item())}') chamfer_l2 = chamfer_pcl(icpsol.Xt, points_c[None]) iteration += 1 if (chamfer_l2.item() < best_chamfer): pp = (lambda x: list(x.detach().cpu().numpy())) logger.warn(f'iter {iteration} (azelcr{pp(azelcr[0])}, s{s}, T{pp(T[0])}) found better chamfer {float(chamfer_l2.item())}') best_chamfer = chamfer_l2.item() best_coarse_transform = coarse_transforms best_fine_RTs = icpsol.RTs if (best_coarse_transform is not None): assert (best_fine_RTs is not None) transforms_w.append(best_coarse_transform) transforms_w.append(RTs_to_transform(best_fine_RTs)) transforms_c = Transform3d(device=device).compose(*transforms_c) transforms_w = Transform3d(device=device).compose(*transforms_w) transforms_w2c = transforms_w.compose(transforms_c.inverse()) return (transforms_w2c, (best_coarse_transform is not None))
def save_all_csv(all_headers, all_columns, result_path): result_dir = os.path.dirname(result_path) if (not os.path.exists(result_dir)): os.makedirs(result_dir) with open(result_path, 'w+') as f: writer = csv.writer(f) writer.writerow(all_headers) writer.writerows(all_columns)
class TestCategoricalPolicies(TfGraphTestCase): .parametrize('policy_cls', [*policies]) def test_categorical_policies(self, policy_cls): with LocalTFRunner(snapshot_config, sess=self.sess) as runner: env = GarageEnv(normalize(gym.make('CartPole-v0'))) policy = policy_cls(name='policy', env_spec=env.spec) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TRPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, max_kl_step=0.01, optimizer=ConjugateGradientOptimizer, optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(base_eps=1e-05))) runner.setup(algo, env) runner.train(n_epochs=1, batch_size=4000) env.close()
class TeladocRequestPrescription(VirtualFunctionTool): name = 'TeladocRequestPrescription' summary = "Request a new prescription or a refill of an existing prescription by providing the medication name, dosage, and the doctor's unique identifier." parameters: List[ArgParameter] = [{'name': 'medication_name', 'type': 'string', 'description': 'The name of the medication.', 'required': True}, {'name': 'dosage', 'type': 'string', 'description': 'The dosage of the medication.', 'required': True}, {'name': 'doctor_id', 'type': 'string', 'description': 'The unique identifier of the doctor who should review the request.', 'required': True}] returns: List[ArgReturn] = [{'name': 'prescription_request_id', 'type': 'string', 'description': 'A unique identifier for the prescription request if successfully submitted, otherwise null.'}, {'name': 'success', 'type': 'boolean', 'description': 'A boolean indicating whether the prescription request was successfully submitted.'}] exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'doctor_id' parameter does not exist."}, {'name': 'InvalidRequestException', 'description': "The 'medication_name' or 'dosage' parameter is not valid."}]
class AttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_type='default', attn_num_head_channels=1, output_scale_factor=1.0, add_upsample=True): super().__init__() resnets = [] attentions = [] self.attention_type = attention_type for i in range(num_layers): res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels) resnet_in_channels = (prev_output_channel if (i == 0) else out_channels) resnets.append(ResnetBlock2D(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) attentions.append(AttentionBlock(out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward(self, hidden_states, res_hidden_states_tuple, temb=None): for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[(- 1)] res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) if (self.upsamplers is not None): for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states
def main(): parser = argparse.ArgumentParser() parser.add_argument('--data_path', type=str, default='data.joblib') parser.add_argument('--test_strat', type=int, default=0) parser.add_argument('--device_id', type=int, default=0) parser.add_argument('--num_epochs_s2cnn', type=int, default=30) parser.add_argument('--num_epochs_mlp', type=int, default=30) parser.add_argument('--batch_size_s2cnn', type=int, default=32) parser.add_argument('--batch_size_mlp', type=int, default=32) parser.add_argument('--init_learning_rate_s2cnn', type=int, default=0.001) parser.add_argument('--learning_rate_mlp', type=int, default=0.001) parser.add_argument('--learning_rate_decay_epochs', type=int, default=10) args = parser.parse_args() torch.cuda.set_device(args.device_id) print('evaluating on {}'.format(args.test_strat)) print('loading data...', end='') (data, train_idxs, test_idxs) = load_data(args.data_path, args.test_strat, cuda=args.device_id) print('done!') mlp = BaselineRegressor() s2cnn = S2CNNRegressor() if torch.cuda.is_available(): for model in [mlp, s2cnn]: model.cuda(args.device_id) print('training baseline model') print('mlp #params: {}'.format(count_params(mlp))) train_baseline(mlp, data, IndexBatcher(train_idxs, args.batch_size_mlp, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_mlp, cuda=args.device_id), args.num_epochs_mlp, args.learning_rate_mlp, args.device_id) print('training residual s2cnn model') print('s2cnn #params: {}'.format(count_params(s2cnn))) train_s2cnn(mlp, s2cnn, data, IndexBatcher(train_idxs, args.batch_size_s2cnn, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_s2cnn, cuda=args.device_id), args.num_epochs_s2cnn, args.init_learning_rate_s2cnn, args.learning_rate_decay_epochs, args.device_id)
def test_simple_graph(tmpdir): try: from graphviz import Digraph except: pytest.skip('Skip because graphviz is not installed.') from nnabla.experimental.viewers import SimpleGraph sg = SimpleGraph() nn.clear_parameters() x = nn.Variable((2, 3, 4, 4)) with nn.parameter_scope('c1'): h = PF.convolution(x, 8, (3, 3), pad=(1, 1)) h = F.relu(PF.batch_normalization(h)) with nn.parameter_scope('f1'): y = PF.affine(h, 10) g = sg.create_graphviz_digraph(y) assert isinstance(g, Digraph) tmpdir.ensure(dir=True) fpath = 'tmp-draw' sg.save(y, fpath, format='png')
class SharedMLP(nn.ModuleList): def __init__(self, in_channels, mlp_channels, ndim=1, bn=True, bn_momentum=0.1): super(SharedMLP, self).__init__() self.in_channels = in_channels if (ndim == 1): mlp_module = Conv1d elif (ndim == 2): mlp_module = Conv2d else: raise ValueError() for (ind, out_channels) in enumerate(mlp_channels): self.append(mlp_module(in_channels, out_channels, 1, relu=True, bn=bn, bn_momentum=bn_momentum)) in_channels = out_channels self.out_channels = in_channels def forward(self, x): for module in self: x = module(x) return x
def generate(): r = (x[0] - sun) n_tail_paticles = int((tail_paticle_scale / (r.norm(0.001) ** 2))) for _ in range(n_tail_paticles): r = x[0] if ti.static((dim == 3)): r = rand_unit_3d() else: r = rand_unit_2d() xi = ((ti.atomic_add(count[None], 1) % (N - 1)) + 1) x[xi] = x[0] v[xi] = ((r * vel_init) + v[0]) inv_m[xi] = (0.5 + ti.random()) color[xi] = color_init
class CAnalysedBaseTypeNode(Node): child_attrs = [] def analyse(self, env, could_be_name=False): return self.type
class AbstractCLIPAlgorithm(Algorithm): def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class): super(AbstractCLIPAlgorithm, self).__init__(feature_dim, num_classes, num_domains, hparams) self.clip_model = pretrained self.num_classes = num_classes self.idx2class = idx2class if (self.clip_model is not None): for param in self.clip_model.parameters(): param.requires_grad = False self.transform = None self.bottleneck = None self.classifier = None self.is_debug = hparams['debug'] def get_clip_label_text_features(self, normalize=True, multiple_prompts=False): device = next(self.clip_model.parameters()).device class_names = [self.idx2class[idx] for idx in range(len(self.idx2class.items()))] if (not multiple_prompts): tool = language_tool_python.LanguageTool('en-US') text_inputs = torch.cat([clip.tokenize(tool.correct(f'a picture of a {c}')) for c in class_names]).to(device) with torch.no_grad(): text_features = self.clip_model.encode_text(text_inputs) if normalize: text_features /= text_features.norm(dim=(- 1), keepdim=True) else: is_training = self.clip_model.training self.clip_model.eval() with torch.no_grad(): text_features = [] for classname in tqdm(class_names): texts = [template.format(classname) for template in clip_prompt_templates] texts = clip.tokenize(texts).to(device) class_embeddings = self.clip_model.encode_text(texts) if normalize: class_embeddings /= class_embeddings.norm(dim=(- 1), keepdim=True) class_embedding = class_embeddings.mean(dim=0) if normalize: class_embedding /= class_embedding.norm() text_features.append(class_embedding) text_features = torch.stack(text_features, dim=0).to(device) if is_training: self.clip_model.train() text_features = text_features.float() return text_features def get_device(self): if (self.clip_model is not None): device = next(self.clip_model.parameters()).device else: device = next(self.transform.parameters()).device return device def get_transformed_feature(self, all_x): all_z = self.transform(all_x) return all_z def preprocess_features(self, loader, return_tensor=False, use_tqdm=False): assert (not self.training), 'Should be in the evaluation mode!!!' device = self.get_device() if use_tqdm: loader = tqdm(loader) with torch.no_grad(): (Z, Y) = ([], []) for (x, y) in loader: Z += [self.bottleneck(self.get_transformed_feature(x.to(device))).cpu().numpy()] Y += [y.cpu().numpy()] if return_tensor: return (torch.tensor(np.concatenate(Z)), torch.tensor(np.concatenate(Y))) else: return (np.concatenate(Z), np.concatenate(Y)) def loss(self, all_x, all_y, all_d): raise NotImplementedError def update(self, minibatches, unlabeled=None): raise NotImplementedError def fit_classifier(self, clf_train_data, clf_valid_data, prompt_engineer=False, train_clf_hparams=None): device = self.get_device() clf_type = self.hparams['clf_type'] assert (clf_type in ['SVM', 'Logistic', 'LogisticPT', 'ZeroShot']) use_sklearn = (clf_type in ['SVM', 'Logistic']) print('Fitting classifier: {}...'.format(clf_type)) (clf_train_features, clf_train_labels) = clf_train_data (clf_val_features, clf_val_labels) = clf_valid_data if use_sklearn: clf_all_features = np.concatenate([clf_train_features, clf_val_features]) clf_all_labels = np.concatenate([clf_train_labels, clf_val_labels]) cv_fold = np.concatenate([np.full(clf_train_features.shape[0], (- 1), dtype=np.int8), np.zeros(clf_val_features.shape[0], dtype=np.int8)]) cv = PredefinedSplit(cv_fold) if (clf_type == 'SVM'): base_params = {'penalty': 'l2', 'max_iter': 1000, 'verbose': 0} base_estimator_class = LinearSVC elif (clf_type == 'Logistic'): base_params = {'penalty': 'l2', 'max_iter': 1000, 'multi_class': 'multinomial', 'solver': 'lbfgs', 'verbose': 0, 'n_jobs': (- 1), 'warm_start': False} base_estimator_class = LogisticRegression else: raise NotImplementedError base_estimator = base_estimator_class(**base_params) if self.is_debug: best_param = {'C': 1.0} else: c_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0] param_grid = {'C': c_range} clf_cv = GridSearchCV(base_estimator, param_grid, cv=cv, refit=False, scoring='accuracy', n_jobs=(- 1), error_score='raise', verbose=0) clf_cv.fit(clf_all_features, clf_all_labels) best_param = clf_cv.best_params_ if (best_param['C'] in [c_range[0], c_range[(- 1)]]): print(f'The best param {best_param} hits the boundary! Please use a larger range!') clf = base_estimator_class(**best_param, **base_params) clf.fit(clf_train_features, clf_train_labels) if (clf_type == 'Logistic'): self.classifier = (lambda z: torch.Tensor(clf.predict_proba(z.cpu().numpy())).to(device)) else: self.classifier = (lambda z: torch.Tensor(clf.decision_function(z.cpu().numpy())).to(device)) elif (clf_type == 'LogisticPT'): precision = 32 lr = 0.0005 batch_size = 512 max_epochs = 500 l2_reg = 0.0 if (train_clf_hparams is not None): assert isinstance(train_clf_hparams, dict) if ('precision' in train_clf_hparams): precision = train_clf_hparams['precision'] if ('lr' in train_clf_hparams): lr = train_clf_hparams['lr'] if ('batch_size' in train_clf_hparams): batch_size = train_clf_hparams['batch_size'] if ('max_epochs' in train_clf_hparams): max_epochs = train_clf_hparams['max_epochs'] if ('l2_reg' in train_clf_hparams): l2_reg = train_clf_hparams['l2_reg'] print('Training PyTorch logistic regression hyperparamters:\n\tprecision: {}\n\tlearning rate: {}\n\tl2 regularization: {}\n\tbatch size: {}\n\tmax epochs: {}\n'.format(precision, lr, l2_reg, batch_size, max_epochs)) dm = SklearnDataModule(clf_train_features, clf_train_labels, x_val=clf_val_features, y_val=clf_val_labels, x_test=None, y_test=None, val_split=0, test_split=0, num_workers=4, shuffle=True, batch_size=batch_size, pin_memory=True, drop_last=False) self.classifier = PLLogisticRegression(input_dim=clf_train_features.shape[(- 1)], num_classes=self.num_classes, learning_rate=lr, l2_strength=l2_reg) early_stop_callback = EarlyStopping(monitor='val_acc', min_delta=0.0005, patience=3, verbose=True, mode='max') trainer = pl.Trainer(gpus=1, precision=precision, auto_lr_find=False, max_epochs=max_epochs, logger=False, checkpoint_callback=False, flush_logs_every_n_steps=50, progress_bar_refresh_rate=50, callbacks=[early_stop_callback]) trainer.fit(self.classifier, train_dataloader=dm.train_dataloader(), val_dataloaders=dm.val_dataloader()) trainer.validate(self.classifier, val_dataloaders=dm.val_dataloader()) self.classifier.to(device) elif (clf_type == 'ZeroShot'): if (self.classifier is None): self.classifier = PLLogisticRegression(input_dim=clf_train_features.shape[(- 1)], num_classes=self.num_classes) self.classifier.to(device) text_features = self.get_clip_label_text_features(multiple_prompts=prompt_engineer) self.classifier.linear.weight.data.copy_(text_features) self.classifier.linear.bias.data.copy_(torch.zeros_like(self.classifier.linear.bias)) else: raise NotImplementedError def forward(self, x): return self.predict(x) def predict(self, x): assert (self.classifier is not None), 'Please fit the classifier by calling `fit_classifier` first!' z = self.bottleneck(self.get_transformed_feature(x)) if (self.hparams['clf_type'] == 'ZeroShot'): z /= z.norm(dim=(- 1), keepdim=True) return self.classifier(z) def trainable(self): return True def adjust_lr(self, step, max_steps, steps_per_epoch): learning_rate = self.hparams['lr'] warmup_from = (self.hparams['lr'] / 5) warm_epochs = 10 lr_decay_rate = 0.1 lr_decay_epochs = [25, 40] eta_min = (self.hparams['lr'] * (lr_decay_rate ** 3)) if (self.hparams['warmup'] and (step <= (warm_epochs * steps_per_epoch))): if self.hparams['cosine_anneal']: warmup_to = (eta_min + (((learning_rate - eta_min) * (1 + math.cos((((math.pi * warm_epochs) * steps_per_epoch) / max_steps)))) / 2)) else: warmup_to = learning_rate p = (step / (warm_epochs * steps_per_epoch)) lr = (warmup_from + (p * (warmup_to - warmup_from))) elif self.hparams['cosine_anneal']: p = (step / max_steps) lr = (eta_min + (((learning_rate - eta_min) * (1 + math.cos((math.pi * p)))) / 2)) else: decay_steps = np.sum((step > (np.asarray(lr_decay_epochs) * steps_per_epoch))) if (decay_steps > 0): lr = (learning_rate * (lr_decay_rate ** decay_steps)) else: lr = learning_rate for param_group in self.optimizer.param_groups: param_group['lr'] = lr
class CaffeResolver(object): def __init__(self): self.import_caffe() def import_caffe(self): self.caffe = None try: import caffe self.caffe = caffe except ImportError: from . import caffepb self.caffepb = caffepb show_fallback_warning() if self.caffe: self.caffepb = self.caffe.proto.caffe_pb2 self.NetParameter = self.caffepb.NetParameter def has_pycaffe(self): return (self.caffe is not None)
class DSModifier_blur(DSModifier_dir): def __init__(self, ds_modifier: Optional[DSModifier]=None, params: Dict[(str, Any)]={'sigma': 1}): self.name = f"blur{params['sigma']}_modifier" self.params: Dict[(str, Any)] = params self.ds_modifier = ds_modifier self.params.update({'modifier': '{}'.format(self._get_name())}) def _mod_img(self, img: np.array) -> np.array: par = self.params['sigma'] image_tensor = transforms.functional.to_tensor(img) tGAUSSIAN = transforms.Compose([transforms.GaussianBlur(kernel_size=(7, 7), sigma=par)]) proc_img = tGAUSSIAN(image_tensor) rec_img = np.asarray(transforms.functional.to_pil_image(proc_img)) return rec_img
def register_all_coco(root): for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_COCO.items(): for (key, (image_root, json_file)) in splits_per_dataset.items(): register_coco_instances(key, _get_builtin_metadata(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root)) for (prefix, (panoptic_root, panoptic_json, semantic_root)) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): prefix_instances = prefix[:(- len('_panoptic'))] instances_meta = MetadataCatalog.get(prefix_instances) (image_root, instances_json) = (instances_meta.image_root, instances_meta.json_file) register_coco_panoptic_separated(prefix, _get_builtin_metadata('coco_panoptic_separated'), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), os.path.join(root, semantic_root), instances_json) register_coco_panoptic(prefix, _get_builtin_metadata('coco_panoptic_standard'), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), instances_json)
def graph_pretty_str(g, shorten=True) -> str: built_in_func_re = re.compile('<built-in function (.*)>') built_in_meth_re = re.compile('<built-in method (.*) of type.*>') op_dict = {'placeholder': 'plchdr', 'get_attr': 'gt_prm', 'call_function': 'cl_fun', 'call_module': 'cl_mod', 'call_method': 'cl_meth'} max_lens = {} col_names = ('name', 'op', 'target', 'args', 'kwargs') for s in col_names: max_lens[s] = len(s) results = [] for n in g.nodes: name = str(n.name) if shorten: name = name.replace('activation_post_process', 'obs') op = str(n.op) if (shorten and (op in op_dict)): op = op_dict[op] target = str(n.target) if shorten: built_in_func = built_in_func_re.search(target) if built_in_func: target = f'<bi_fun {built_in_func.group(1)}>' built_in_meth = built_in_meth_re.search(target) if built_in_meth: target = f'<bi_meth {built_in_meth.group(1)}>' target = target.replace('activation_post_process', 'obs') args = str(n.args) if shorten: args = args.replace('activation_post_process', 'obs') kwargs = str(n.kwargs) for (k, v) in zip(col_names, (name, op, target, args, kwargs)): max_lens[k] = max(max_lens[k], len(v)) results.append([name, op, target, args, kwargs]) res_str = '' format_str = '{:<{name}} {:<{op}} {:<{target}} {:<{args}} {:<{kwargs}}\n' res_str += format_str.format(*col_names, **max_lens) for result in results: res_str += format_str.format(*result, **max_lens) if shorten: res_str += '*obs_{n} = activation_post_process_{n}\n' return res_str
class Encoder(nn.Module): def __init__(self, layer, N): super(Encoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, mask=None): for layer in self.layers: x = layer(x, mask) return self.norm(x)
class Anomaly_Loader(Dataset): def __init__(self, is_train=1, path='/workspace/DATA/UCF-Crime/'): super(Anomaly_Loader, self).__init__() self.is_train = is_train self.path = path if (self.is_train == 1): data_list = os.path.join(path, 'train_anomaly.txt') with open(data_list, 'r') as f: self.data_list = f.readlines() else: data_list = os.path.join(path, 'test_anomalyv2.txt') with open(data_list, 'r') as f: self.data_list = f.readlines() def __len__(self): return len(self.data_list) def __getitem__(self, idx): if (self.is_train == 1): rgb_npy = np.load(os.path.join((self.path + 'all_rgbs'), (self.data_list[idx][:(- 1)] + '.npy'))) flow_npy = np.load(os.path.join((self.path + 'all_flows'), (self.data_list[idx][:(- 1)] + '.npy'))) concat_npy = np.concatenate([rgb_npy, flow_npy], axis=1) return concat_npy else: (name, frames, gts) = (self.data_list[idx].split('|')[0], int(self.data_list[idx].split('|')[1]), self.data_list[idx].split('|')[2][1:(- 2)].split(',')) gts = [int(i) for i in gts] rgb_npy = np.load(os.path.join((self.path + 'all_rgbs'), (name + '.npy'))) flow_npy = np.load(os.path.join((self.path + 'all_flows'), (name + '.npy'))) concat_npy = np.concatenate([rgb_npy, flow_npy], axis=1) return (concat_npy, gts, frames)
class BeplerLanguageModelingTask(AbstractLanguageModelingTask): def __init__(self): n_symbols = len(PFAM_VOCAB) super().__init__(key_metric='LMACC', deserialization_func=deserialize_pfam_sequence, n_classes=n_symbols, label_name='primary', input_name='lm_outputs', output_name='lm_logits') def build_output_model(self, layers: List[tf.keras.Model]) -> List[tf.keras.Model]: layers.append(BidirectionalOutputShift(self._input_name, 'shifted_logits')) layers.append(AminoAcidClassPredictor(self._n_classes, 'shifted_logits', self._output_name, use_conv=False)) return layers
def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args): iteration = 0 (x, y, z, tau, kappa) = _get_blind_start(A.shape) ip = (ip if pc else False) (rho_p, rho_d, rho_A, rho_g, rho_mu, obj) = _indicators(A, b, c, c0, x, y, z, tau, kappa) go = ((rho_p > tol) or (rho_d > tol) or (rho_A > tol)) if disp: _display_iter(rho_p, rho_d, rho_g, '-', rho_mu, obj, header=True) if (callback is not None): (x_o, fun, slack, con) = _postsolve((x / tau), postsolve_args) res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, 'con': con, 'nit': iteration, 'phase': 1, 'complete': False, 'status': 0, 'message': '', 'success': False}) callback(res) status = 0 message = 'Optimization terminated successfully.' if sparse: A = sps.csc_matrix(A) while go: iteration += 1 if ip: gamma = 1 def eta(g): return 1 else: gamma = (0 if pc else (beta * np.mean((z * x)))) def eta(g=gamma): return (1 - g) try: (d_x, d_y, d_z, d_tau, d_kappa) = _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) if ip: alpha = 1.0 (x, y, z, tau, kappa) = _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) x[(x < 1)] = 1 z[(z < 1)] = 1 tau = max(1, tau) kappa = max(1, kappa) ip = False else: alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0) (x, y, z, tau, kappa) = _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) except (LinAlgError, FloatingPointError, ValueError, ZeroDivisionError): status = 4 message = _get_message(status) break (rho_p, rho_d, rho_A, rho_g, rho_mu, obj) = _indicators(A, b, c, c0, x, y, z, tau, kappa) go = ((rho_p > tol) or (rho_d > tol) or (rho_A > tol)) if disp: _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj) if (callback is not None): (x_o, fun, slack, con) = _postsolve((x / tau), postsolve_args) res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, 'con': con, 'nit': iteration, 'phase': 1, 'complete': False, 'status': 0, 'message': '', 'success': False}) callback(res) inf1 = ((rho_p < tol) and (rho_d < tol) and (rho_g < tol) and (tau < (tol * max(1, kappa)))) inf2 = ((rho_mu < tol) and (tau < (tol * min(1, kappa)))) if (inf1 or inf2): if (b.transpose().dot(y) > tol): status = 2 else: status = 3 message = _get_message(status) break elif (iteration >= maxiter): status = 1 message = _get_message(status) break x_hat = (x / tau) return (x_hat, status, message, iteration)
def get_mean_scores(scores_list): scores_mean = dict() num_scores = len(scores_list) for key in list(scores_list[0].keys())[:]: scores_mean[key] = {} for sub_key in scores_list[0][key].keys(): scores_mean[key][sub_key] = np.mean([scores_list[i][key][sub_key] for i in range(num_scores)]) scores_mean[key][(sub_key + '_std')] = np.std([scores_list[i][key][sub_key] for i in range(num_scores)]) return scores_mean
def register_Ns3U8TlvValue_methods(root_module, cls): cls.add_constructor([param('ns3::U8TlvValue const &', 'arg0')]) cls.add_constructor([param('uint8_t', 'value')]) cls.add_constructor([]) cls.add_method('Copy', 'ns3::U8TlvValue *', [], is_const=True, is_virtual=True) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')], is_virtual=True) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')]) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetValue', 'uint8_t', [], is_const=True) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) return
def sequences_for_rank(num_sequences): rank = dist.get_rank() ws = dist.get_world_size() num_seq_per_gpu = divide_across_ranks(num_sequences, ws, rank) num_workers = (multiprocessing.cpu_count() // ws) return [seq.tolist() for seq in np.array_split(get_sequences(num_sequences, num_workers=num_workers), ws)[rank][:num_seq_per_gpu]]
def group_nodes_by_stage_id(nodes: Iterable[Node]) -> Dict[(int, List[Node])]: ids = {n.stage_id for n in nodes} stages = OrderedDict() for i in sorted(ids): stages[i] = [] for n in nodes: stages[n.stage_id].append(n) return stages
def tagged_corpus_dict(): tagged = {} File = path_out = (path + '/nvd_2010-2013_w_tags.graphson') obj_text = codecs.open(File, 'r', encoding='utf-8').read() obj = json.loads(obj_text) for year in list(obj.keys()): print(year) for j in range(len(obj[year]['vertices'])): V = obj[year]['vertices'][j] ID = V['_id'] tagged[ID] = V['tagged_text'] print('done with ', year) return tagged
def get_dataset(dataset: Dataset, data_path: str, batch_size: int, num_workers: int=4, distributed=True, enable_auto_augmentation=False): (train_transform, test_transform) = _get_dataset_augmentation_normalization(dataset, distributed=distributed, enable_auto_augmentation=enable_auto_augmentation) if (dataset == Dataset.CIFAR10): trainset = torchvision.datasets.CIFAR10(root=data_path, train=True, download=True, transform=train_transform) testset = torchvision.datasets.CIFAR10(root=data_path, train=False, download=True, transform=test_transform) elif (dataset == Dataset.ImageNet): trainset = torchvision.datasets.ImageFolder(os.path.join(data_path, 'train'), transform=train_transform) testset = torchvision.datasets.ImageFolder(os.path.join(data_path, 'validation'), transform=test_transform) else: raise NotImplemented train_sampler = None val_sampler = None if distributed: print('Starting Distributed Datasets') train_sampler = torch.utils.data.distributed.DistributedSampler(trainset) val_sampler = torch.utils.data.distributed.DistributedSampler(testset) train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=(train_sampler is None), num_workers=num_workers, pin_memory=True, sampler=train_sampler, collate_fn=(_fast_collate if distributed else None)) test_loader = None if (testset is not None): test_loader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, sampler=val_sampler, collate_fn=(_fast_collate if distributed else None)) return (train_loader, test_loader, train_sampler)
class MatchPrior(object): def __init__(self, center_form_priors, center_variance, size_variance, iou_threshold): self.center_form_priors = center_form_priors self.corner_form_priors = box_utils.center_form_to_corner_form(center_form_priors) self.center_variance = center_variance self.size_variance = size_variance self.iou_threshold = iou_threshold def __call__(self, gt_boxes, gt_labels): if (type(gt_boxes) is np.ndarray): gt_boxes = torch.from_numpy(gt_boxes) if (type(gt_labels) is np.ndarray): gt_labels = torch.from_numpy(gt_labels) (boxes, labels) = box_utils.assign_priors(gt_boxes, gt_labels, self.corner_form_priors, self.iou_threshold) boxes = box_utils.corner_form_to_center_form(boxes) locations = box_utils.convert_boxes_to_locations(boxes, self.center_form_priors, self.center_variance, self.size_variance) return (locations, labels)
class Paste(object): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, sample): (img, mask) = (sample['image'], sample['label']) assert (img.size == mask.size) (w, h) = img.size (th, tw) = self.size assert ((w <= tw) and (h <= th)) if ((w == tw) and (h == th)): return {'image': img, 'label': mask} new_img = Image.new('RGB', (tw, th), 'black') new_mask = Image.new('L', (tw, th), 'white') new_img.paste(img, (0, 0)) new_mask.paste(mask, (0, 0)) return {'image': new_img, 'label': new_mask}
def decode_woo_head(target_maps, offset_map=None): (max_v, idx) = torch.max(target_maps.view(target_maps.size(0), target_maps.size(1), (target_maps.size(2) * target_maps.size(3))), 2) preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float() max_v = max_v.view(idx.size(0), idx.size(1), 1) pred_mask = max_v.gt(0).repeat(1, 1, 2).float() preds[(..., 0)].remainder_(target_maps.size(3)) preds[(..., 1)].div_(target_maps.size(2)).floor_() preds.mul_(pred_mask) return preds
class sage__libs__pari(JoinFeature): def __init__(self): JoinFeature.__init__(self, 'sage.libs.pari', [PythonModule('sage.libs.pari.convert_sage')], spkg='sagemath_pari', type='standard')
def get_accidentals(fifths: int) -> List[Optional[int]]: if (fifths >= 0): accidentals = [None, 1, None, 1, None, None, 1, None, 1, None, 1, None] if (fifths > 0): sharps = [5, 0, 7, 2, 9, 4, 11] for idx in sharps[:fifths]: accidentals[(idx + 1)] = None accidentals[idx] = 0 elif (fifths < 0): accidentals = [None, (- 1), None, (- 1), None, None, (- 1), None, (- 1), None, (- 1), None] flats = [11, 4, 9, 2, 7, 0, 5] for idx in flats[:(- fifths)]: accidentals[(idx - 1)] = None accidentals[idx] = 0 return accidentals
def create_json_map(test_files_to_run, json_output_file): if (json_output_file is None): return test_map = {} for test_file in test_files_to_run: names = test_file.split(os.path.sep) if (names[1] == 'models'): key = os.path.sep.join(names[1:3]) elif ((len(names) > 2) or (not test_file.endswith('.py'))): key = os.path.sep.join(names[1:2]) else: key = 'common' if (key not in test_map): test_map[key] = [] test_map[key].append(test_file) keys = sorted(test_map.keys()) test_map = {k: ' '.join(sorted(test_map[k])) for k in keys} with open(json_output_file, 'w', encoding='UTF-8') as fp: json.dump(test_map, fp, ensure_ascii=False)
class ResBlockAudio(nn.Module): def __init__(self, dim): super().__init__() self.block = nn.Sequential(nn.Conv2d(dim, dim, 3, 1, 1), nn.BatchNorm2d(dim), nn.ReLU(True), nn.Conv2d(dim, dim, 1), nn.BatchNorm2d(dim)) def forward(self, x): return (x + self.block(x))
_correctness def test_triple_nested_forwarding(): sdfg = outer_sqrt_with_intermediate.to_sdfg(simplify=False) sdfg.apply_transformations_repeated([StateFusion]) def torch_func(*, Y): inter = torch.sqrt(Y) inter2 = torch.sqrt(inter) inter3 = torch.sqrt(inter2) W = torch.log(inter3) Z = torch.sum(W) Z.backward() return dict(gradient_Y=Y.grad) return (SDFGBackwardRunner(sdfg, '__return', simplify=False), torch_func, dict(Y=np.random.rand(3, 3).astype(np.float32)))
def test_numpytype_int32(): t = NumpyType('int32') assert (str(parser.parse(str(t))) == str(t))
def tera_logMelBase_T_F_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs): kwargs['ckpt'] = ' return tera_url(*args, refresh=refresh, **kwargs)
def _get_flops_counter(only_conv_linear): if only_conv_linear: return __conv_linear_flops_counter return __generic_flops_counter
def _int_overflow(x, exception, msg=None): if (x > iinfo(dfitpack_int).max): if (msg is None): msg = f'{x!r} cannot fit into an {dfitpack_int!r}' raise exception(msg) return dfitpack_int.type(x)
class OverfeatModel(model.Model): def __init__(self): super(OverfeatModel, self).__init__('overfeat', 231, 32, 0.005) def add_inference(self, cnn): cnn.conv(96, 11, 11, 4, 4, mode='VALID') cnn.mpool(2, 2) cnn.conv(256, 5, 5, 1, 1, mode='VALID') cnn.mpool(2, 2) cnn.conv(512, 3, 3) cnn.conv(1024, 3, 3) cnn.conv(1024, 3, 3) cnn.mpool(2, 2) cnn.reshape([(- 1), ((1024 * 6) * 6)]) cnn.affine(3072) cnn.affine(4096)
class GANTask(GPUTask): def main(self): import sys sys.path.append('..') import os import tensorflow as tf from gan.load_data import load_data from gan.network import DoppelGANgerGenerator, Discriminator, RNNInitialStateType, AttrDiscriminator from gan.doppelganger import DoppelGANger from gan import output from gan.util import add_gen_flag, normalize_per_sample sys.modules['output'] = output (data_feature, data_attribute, data_gen_flag, data_feature_outputs, data_attribute_outputs) = load_data(os.path.join('..', 'data', self._config['dataset'])) print(data_feature.shape) print(data_attribute.shape) print(data_gen_flag.shape) if self._config['self_norm']: (data_feature, data_attribute, data_attribute_outputs, real_attribute_mask) = normalize_per_sample(data_feature, data_attribute, data_feature_outputs, data_attribute_outputs) else: real_attribute_mask = ([True] * len(data_attribute_outputs)) sample_len = self._config['sample_len'] (data_feature, data_feature_outputs) = add_gen_flag(data_feature, data_gen_flag, data_feature_outputs, sample_len) print(data_feature.shape) print(len(data_feature_outputs)) initial_state = None if (self._config['initial_state'] == 'variable'): initial_state = RNNInitialStateType.VARIABLE elif (self._config['initial_state'] == 'random'): initial_state = RNNInitialStateType.RANDOM elif (self._config['initial_state'] == 'zero'): initial_state = RNNInitialStateType.ZERO else: raise NotImplementedError generator = DoppelGANgerGenerator(feed_back=self._config['feed_back'], noise=self._config['noise'], feature_outputs=data_feature_outputs, attribute_outputs=data_attribute_outputs, real_attribute_mask=real_attribute_mask, sample_len=sample_len, feature_num_layers=self._config['gen_feature_num_layers'], feature_num_units=self._config['gen_feature_num_units'], attribute_num_layers=self._config['gen_attribute_num_layers'], attribute_num_units=self._config['gen_attribute_num_units'], initial_state=initial_state) discriminator = Discriminator(num_layers=self._config['disc_num_layers'], num_units=self._config['disc_num_units']) if self._config['aux_disc']: attr_discriminator = AttrDiscriminator(num_layers=self._config['attr_disc_num_layers'], num_units=self._config['attr_disc_num_units']) checkpoint_dir = os.path.join(self._work_dir, 'checkpoint') if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) sample_dir = os.path.join(self._work_dir, 'sample') if (not os.path.exists(sample_dir)): os.makedirs(sample_dir) time_path = os.path.join(self._work_dir, 'time.txt') run_config = tf.ConfigProto() with tf.Session(config=run_config) as sess: gan = DoppelGANger(sess=sess, checkpoint_dir=checkpoint_dir, sample_dir=sample_dir, time_path=time_path, epoch=self._config['epoch'], batch_size=self._config['batch_size'], data_feature=data_feature, data_attribute=data_attribute, real_attribute_mask=real_attribute_mask, data_gen_flag=data_gen_flag, sample_len=sample_len, data_feature_outputs=data_feature_outputs, data_attribute_outputs=data_attribute_outputs, vis_freq=self._config['vis_freq'], vis_num_sample=self._config['vis_num_sample'], generator=generator, discriminator=discriminator, attr_discriminator=(attr_discriminator if self._config['aux_disc'] else None), d_gp_coe=self._config['d_gp_coe'], attr_d_gp_coe=(self._config['attr_d_gp_coe'] if self._config['aux_disc'] else 0.0), g_attr_d_coe=(self._config['g_attr_d_coe'] if self._config['aux_disc'] else 0.0), d_rounds=self._config['d_rounds'], g_rounds=self._config['g_rounds'], g_lr=self._config['g_lr'], d_lr=self._config['d_lr'], attr_d_lr=(self._config['attr_d_lr'] if self._config['aux_disc'] else 0.0), extra_checkpoint_freq=self._config['extra_checkpoint_freq'], epoch_checkpoint_freq=self._config['epoch_checkpoint_freq'], num_packing=self._config['num_packing']) gan.build() if (('restore' in self._config) and self._config['restore']): restore = True else: restore = False gan.train(restore=restore)
def convert_conv2d_weight_memory_format(module, memory_format): if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.ConvTranspose2d)): weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format) module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format) for child in module.children(): convert_conv2d_weight_memory_format(child, memory_format) return module
def remap_vars_by_sameness(x, y, sameness): name_map = defaultdict(list) for var in x: for same_var in sameness[var]: if (same_var in y): name_map[var].append(same_var) if (var not in name_map): raise ValueError(f'Do not know how to remap {var} in {x} using {y}') if (type(x) in (list, tuple, set)): new_x = [] for var in x: new_x += name_map[var] new_x = type(x)(new_x) elif (type(x) is dict): new_x = {} for var in x: for new_var in name_map[var]: new_x[new_var] = x[var] else: raise ValueError(f'Do not know how to remap {x}') return new_x
_cache() def logging_record_argv() -> None: s = subprocess.list2cmdline(sys.argv) with open(os.path.join(logging_run_dir(), 'argv'), 'w') as f: f.write(s)
class CArrayDeclaratorNode(CDeclaratorNode): child_attrs = ['base', 'dimension'] def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False): if ((base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction): from .ExprNodes import TupleNode if isinstance(self.dimension, TupleNode): args = self.dimension.args else: args = (self.dimension,) values = [v.analyse_as_type(env) for v in args] if (None in values): ix = values.index(None) error(args[ix].pos, 'Template parameter not a type') base_type = error_type else: base_type = base_type.specialize_here(self.pos, values) return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd) if self.dimension: self.dimension = self.dimension.analyse_const_expression(env) if (not self.dimension.type.is_int): error(self.dimension.pos, 'Array dimension not integer') size = self.dimension.get_constant_c_result_code() if (size is not None): try: size = int(size) except ValueError: pass else: size = None if (not base_type.is_complete()): error(self.pos, ("Array element type '%s' is incomplete" % base_type)) if base_type.is_pyobject: error(self.pos, 'Array element cannot be a Python object') if base_type.is_cfunction: error(self.pos, 'Array element cannot be a function') array_type = PyrexTypes.c_array_type(base_type, size) return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
def generate_abc(args): if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') patchilizer = Patchilizer() patch_config = GPT2Config(num_hidden_layers=PATCH_NUM_LAYERS, max_length=PATCH_LENGTH, max_position_embeddings=PATCH_LENGTH, vocab_size=1) char_config = GPT2Config(num_hidden_layers=CHAR_NUM_LAYERS, max_length=PATCH_SIZE, max_position_embeddings=PATCH_SIZE, vocab_size=128) model = TunesFormer(patch_config, char_config, share_weights=SHARE_WEIGHTS) filename = 'weights.pth' if os.path.exists(filename): print(f"Weights already exist at '{filename}'. Loading...") else: print(f"Downloading weights to '{filename}' from huggingface.co...") try: url = ' response = requests.get(url, stream=True) total_size = int(response.headers.get('content-length', 0)) chunk_size = 1024 with open(filename, 'wb') as file, tqdm(desc=filename, total=total_size, unit='B', unit_scale=True, unit_divisor=1024) as bar: for data in response.iter_content(chunk_size=chunk_size): size = file.write(data) bar.update(size) except Exception as e: print(f'Error: {e}') exit() checkpoint = torch.load('weights.pth') model.load_state_dict(checkpoint['model']) model = model.to(device) model.eval() with open('prompt.txt', 'r') as f: prompt = f.read() tunes = '' num_tunes = args.num_tunes max_patch = args.max_patch top_p = args.top_p top_k = args.top_k temperature = args.temperature seed = args.seed show_control_code = args.show_control_code print(' HYPERPARAMETERS '.center(60, '#'), '\n') args = vars(args) for key in args.keys(): print(((key + ': ') + str(args[key]))) print(('\n' + ' OUTPUT TUNES '.center(60, '#'))) start_time = time.time() for i in range(num_tunes): tune = ((('X:' + str((i + 1))) + '\n') + prompt) lines = re.split('(\\n)', tune) tune = '' skip = False for line in lines: if (show_control_code or (line[:2] not in ['S:', 'B:', 'E:'])): if (not skip): print(line, end='') tune += line skip = False else: skip = True input_patches = torch.tensor([patchilizer.encode(prompt, add_special_patches=True)[:(- 1)]], device=device) if (tune == ''): tokens = None else: prefix = patchilizer.decode(input_patches[0]) remaining_tokens = prompt[len(prefix):] tokens = torch.tensor(([patchilizer.bos_token_id] + [ord(c) for c in remaining_tokens]), device=device) while (input_patches.shape[1] < max_patch): (predicted_patch, seed) = model.generate(input_patches, tokens, top_p=top_p, top_k=top_k, temperature=temperature, seed=seed) tokens = None if (predicted_patch[0] != patchilizer.eos_token_id): next_bar = patchilizer.decode([predicted_patch]) if (show_control_code or (next_bar[:2] not in ['S:', 'B:', 'E:'])): print(next_bar, end='') tune += next_bar if (next_bar == ''): break next_bar = (remaining_tokens + next_bar) remaining_tokens = '' predicted_patch = torch.tensor(patchilizer.bar2patch(next_bar), device=device).unsqueeze(0) input_patches = torch.cat([input_patches, predicted_patch.unsqueeze(0)], dim=1) else: break tunes += (tune + '\n\n') print('\n') print('Generation time: {:.2f} seconds'.format((time.time() - start_time))) timestamp = time.strftime('%a_%d_%b_%Y_%H_%M_%S', time.localtime()) with open((('output_tunes/' + timestamp) + '.abc'), 'w') as f: f.write(tunes)
def reduce_any(source: Tensor[T], *, axis: Union[(Dim, Sequence[Dim])], use_mask: bool=True) -> Tensor[T]: return reduce(source=source, mode='any', axis=axis, use_mask=use_mask)
def sample_initial_states(rng): rabbits = rng.randint(10, 100) foxes = (rng.uniform(0.1, 0.8) * rabbits) return wn.lotka_volterra.State(rabbits=rabbits, foxes=foxes)
def setup_logger(name, save_dir, distributed_rank): logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) if (distributed_rank > 0): return logger ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) if save_dir: fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'), mode='w') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger
_sentencepiece class MarianTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MarianTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() vocab = ['</s>', '<unk>', 'This', 'is', 'a', 't', 'est', 'G', '<pad>'] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(self.tmpdirname) save_json(vocab_tokens, (save_dir / VOCAB_FILES_NAMES['vocab'])) save_json(mock_tokenizer_config, (save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'])) if (not (save_dir / VOCAB_FILES_NAMES['source_spm']).exists()): copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['source_spm'])) copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['target_spm'])) tokenizer = MarianTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return ('This is a test', 'This is a test') def test_convert_token_and_id(self): token = '</s>' token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '</s>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[(- 1)], '<pad>') self.assertEqual(len(vocab_keys), 9) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 9) def test_tokenizer_equivalence_en_de(self): en_de_tokenizer = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de') batch = en_de_tokenizer(['I am a small frog'], return_tensors=None) self.assertIsInstance(batch, BatchEncoding) expected = [38, 121, 14, 697, 38848, 0] self.assertListEqual(expected, batch.input_ids[0]) save_dir = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(save_dir) contents = [x.name for x in Path(save_dir).glob('*')] self.assertIn('source.spm', contents) MarianTokenizer.from_pretrained(save_dir) def test_outputs_not_longer_than_maxlen(self): tok = self.get_tokenizer() batch = tok([('I am a small frog' * 1000), 'I am a small frog'], padding=True, truncation=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 512)) def test_outputs_can_be_shorter(self): tok = self.get_tokenizer() batch_smaller = tok(['I am a tiny frog', 'I am a small frog'], padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch_smaller, BatchEncoding) self.assertEqual(batch_smaller.input_ids.shape, (2, 10)) def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='Helsinki-NLP/opus-mt-en-de', revision='1a8c2263da11e68e50938f97e10cd57820bd504c', decode_kwargs={'use_source_tokenizer': True})
class StableDiffusionImg2ImgPipeline(DiffusionPipeline): def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[(DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler)], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPFeatureExtractor): super().__init__() scheduler = scheduler.set_format('pt') self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) def enable_attention_slicing(self, slice_size: Optional[Union[(str, int)]]='auto'): if (slice_size == 'auto'): slice_size = (self.unet.config.attention_head_dim // 2) self.unet.set_attention_slice(slice_size) def disable_attention_slicing(self): self.enable_attention_slice(None) _grad() def __call__(self, prompt: Union[(str, List[str])], init_image: Union[(torch.FloatTensor, PIL.Image.Image)], strength: float=0.8, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, eta: Optional[float]=0.0, generator: Optional[torch.Generator]=None, output_type: Optional[str]='pil', return_dict: bool=True): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if ((strength < 0) or (strength > 1)): raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') accepts_offset = ('offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())) extra_set_kwargs = {} offset = 0 if accepts_offset: offset = 1 extra_set_kwargs['offset'] = 1 self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) if (not isinstance(init_image, torch.FloatTensor)): init_image = preprocess(init_image) init_latent_dist = self.vae.encode(init_image.to(self.device)).latent_dist init_latents = init_latent_dist.sample(generator=generator) init_latents = (0.18215 * init_latents) init_latents = torch.cat(([init_latents] * batch_size)) init_timestep = (int((num_inference_steps * strength)) + offset) init_timestep = min(init_timestep, num_inference_steps) if isinstance(self.scheduler, LMSDiscreteScheduler): timesteps = torch.tensor(([(num_inference_steps - init_timestep)] * batch_size), dtype=torch.long, device=self.device) else: timesteps = self.scheduler.timesteps[(- init_timestep)] timesteps = torch.tensor(([timesteps] * batch_size), dtype=torch.long, device=self.device) noise = torch.randn(init_latents.shape, generator=generator, device=self.device) init_latents = self.scheduler.add_noise(init_latents, noise, timesteps).to(self.device) text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] do_classifier_free_guidance = (guidance_scale > 1.0) if do_classifier_free_guidance: max_length = text_input.input_ids.shape[(- 1)] uncond_input = self.tokenizer(([''] * batch_size), padding='max_length', max_length=max_length, return_tensors='pt') uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) accepts_eta = ('eta' in set(inspect.signature(self.scheduler.step).parameters.keys())) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta latents = init_latents t_start = max(((num_inference_steps - init_timestep) + offset), 0) for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps[t_start:])): t_index = (t_start + i) latent_model_input = (torch.cat(([latents] * 2)) if do_classifier_free_guidance else latents) if isinstance(self.scheduler, LMSDiscreteScheduler): sigma = self.scheduler.sigmas[t_index] latent_model_input = (latent_model_input / (((sigma ** 2) + 1) ** 0.5)) latent_model_input = latent_model_input.to(self.unet.dtype) t = t.to(self.unet.dtype) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = (noise_pred_uncond + (guidance_scale * (noise_pred_text - noise_pred_uncond))) if isinstance(self.scheduler, LMSDiscreteScheduler): latents = self.scheduler.step(noise_pred, t_index, latents, **extra_step_kwargs).prev_sample else: latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample latents = ((1 / 0.18215) * latents) image = self.vae.decode(latents.to(self.vae.dtype)).sample image = ((image / 2) + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(self.device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values) if (output_type == 'pil'): image = self.numpy_to_pil(image) if (not return_dict): return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
def QueryParamWrapper(constructor, args, params): r = constructor(args, params) def _do(self, original=r._configuration_hook): original() self._write('frontend query-parameters 1') assert (self._line()[0] == 'ok'), 'enabling query parameter support failed' r._configuration_hook = MethodType(_do, r) def _sqa(self, *args): self._write(('query-params %s set' % ' '.join(map(Subprocess._quote, args)))) assert (self._line()[0] == 'ok'), 'reconfiguring query parameters failed' print(args) r.set_query_arguments = MethodType(_sqa, r) return r
.overload_method(ListOffsetType, 'begin_list', inline='always') def ListOffset_begin_list(builder): if isinstance(builder, ListOffsetType): def begin_list(builder): return builder._content return begin_list
def factory_load_embeddings(config_dict): global word_embeds, word_to_ix, char_embeds, char_to_ix (word_embeds, word_to_ix, char_embeds, char_to_ix) = load_model_embeddings(config_dict)
def _get_builtin_metadata(dataset_name): return _get_phrasecut_metadata([]) raise KeyError('No built-in metadata for dataset {}'.format(dataset_name))
class VideoCaptionInstructDataset(VideoCaptionDataset): def __getitem__(self, index): data = super().__getitem__(index) if (data != None): data['text_output'] = data['text_input'] data['text_input'] = self.text_processor('') return data
def load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False): def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src)) elif (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) src_dataset = data_utils.load_indexed_dataset((prefix + src), src_dict, dataset_impl) if truncate_source: src_dataset = AppendTokenDataset(TruncateDataset(StripTokenDataset(src_dataset, src_dict.eos()), (max_source_positions - 1)), src_dict.eos()) src_datasets.append(src_dataset) tgt_dataset = data_utils.load_indexed_dataset((prefix + tgt), tgt_dict, dataset_impl) if (tgt_dataset is not None): tgt_datasets.append(tgt_dataset) logger.info('{} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)]))) if (not combine): break assert ((len(src_datasets) == len(tgt_datasets)) or (len(tgt_datasets) == 0)) if (len(src_datasets) == 1): src_dataset = src_datasets[0] tgt_dataset = (tgt_datasets[0] if (len(tgt_datasets) > 0) else None) else: sample_ratios = ([1] * len(src_datasets)) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) if (len(tgt_datasets) > 0): tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) else: tgt_dataset = None if prepend_bos: assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index')) src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) if (tgt_dataset is not None): tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) eos = None if append_source_id: src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src))) if (tgt_dataset is not None): tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt))) eos = tgt_dict.index('[{}]'.format(tgt)) align_dataset = None if load_alignments: align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl) tgt_dataset_sizes = (tgt_dataset.sizes if (tgt_dataset is not None) else None) return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, max_source_positions=max_source_positions, max_target_positions=max_target_positions, align_dataset=align_dataset, eos=eos)
def run_finetuning(config: configure_finetuning.FinetuningConfig): results = [] test_results = [] trial = 1 heading_info = 'model={:}, trial {:}/{:}'.format(config.model_name, trial, config.num_trials) heading = (lambda msg: utils.heading(((msg + ': ') + heading_info))) heading('Config') utils.log_config(config) generic_model_dir = config.model_dir tasks = task_builder.get_tasks(config) while ((config.num_trials < 0) or (trial <= config.num_trials)): config.model_dir = ((generic_model_dir + '_') + str(trial)) if config.do_train: utils.rmkdir(config.model_dir) model_runner = ModelRunner(config, tasks) if config.do_train: heading('Start training') model_runner.train() utils.log() if config.do_eval: heading('Run dev set evaluation') results.append(model_runner.evaluate('dev', True)) write_results(config, results) if (config.write_test_outputs and (trial <= config.n_writes_test)): heading('Running on the test set and writing the predictions') for task in tasks: if (task.name in ['cola', 'mrpc', 'mnli', 'sst', 'rte', 'qnli', 'qqp', 'sts']): for split in task.get_test_splits(): model_runner.write_classification_outputs([task], trial, split) elif (task.name == 'squad'): scorer = model_runner.evaluate_task(task) scorer.write_predictions() preds = utils.load_json(config.qa_preds_file('squad')) null_odds = utils.load_json(config.qa_na_file('squad')) for (q, _) in preds.items(): if (null_odds[q] > config.qa_na_threshold): preds[q] = '' utils.write_json(preds, config.test_predictions(task.name, 'dev', trial)) else: utils.log('Skipping task', task.name, '- writing predictions is not supported for this task') if config.do_test: heading('Run Test set evaluation') test_results.append(model_runner.evaluate('test', True)) if config.evaluate_bioasq: for task in tasks: for batch in range(1, 6): scorer = model_runner.evaluate_task(task, ('test' + str(batch)), False) scorer.write_predictions(('test' + str(batch))) if ((trial != config.num_trials) and (not config.keep_all_models)): utils.rmrf(config.model_dir) trial += 1
class ExperimentWrappper(object): def __init__(self, config, wandb_username='', no_sync=False): self.checkpoint_filetag = 'checkpoint' self.final_filetag = 'fin_model_state' self.wandb_username = wandb_username self.project = config['experiment'].get('project_name', None) self.run_name = config['experiment'].get('run_name', None) self.run_id = config['experiment'].get('run_id', None) self.run_local_path = config['experiment'].get('local_dir', None) if (config['experiment']['is_training'] and (self.run_local_path is not None)): os.makedirs(self.run_local_path, exist_ok=True) self.no_sync = no_sync self.in_config = config self.initialized = False self.artifact = None def init_run(self, config={}): if self.no_sync: os.environ['WANDB_MODE'] = 'dryrun' print('Experiment:Warning: run is not synced with wandb cloud') wb.init(name=self.run_name, project=self.project, config=config, resume='allow', id=self.run_id, dir=self.run_local_path, anonymous='allow') self.run_id = wb.run.id if (not self.wandb_username): self.wandb_username = wb.run.entity print(f'{self.__class__.__name__}::WARNING::Running wandb in Anonymous Mode. Your temporary account is: {self.wandb_username}') self.initialized = True self.checkpoint_counter = 0 def stop(self): if self.initialized: wb.finish() self.initialized = False def full_name(self): name = (self.project if self.project else '') name += (('-' + self.run_name) if self.run_name else '') if self.run_id: name += ('-' + self.run_id) else: name += (self.in_config['NN']['pre-trained'] if ('pre-trained' in self.in_config['NN']) else '') return name def last_epoch(self): run = self._run_object() return (run.summary['epoch'] if ('epoch' in run.summary) else (- 1)) def data_info(self): config = self._run_config() split_config = config['data_split'] data_config = config['dataset'] try: self.get_file('data_split.json', './wandb') split_config['filename'] = './wandb/data_split.json' data_config['max_datapoints_per_type'] = None except (ValueError, RuntimeError) as e: print(f'{self.__class__.__name__}::Warning::Skipping loading split file from cloud..') try: self.get_file('panel_classes.json', './wandb') data_config['panel_classification'] = './wandb/panel_classes.json' except (ValueError, RuntimeError) as e: print(f'{self.__class__.__name__}::Warning::Skipping loading panel classes file from cloud..') try: self.get_file('param_filter.json', './wandb') data_config['filter_by_params'] = './wandb/param_filter.json' except (ValueError, RuntimeError) as e: print(f'{self.__class__.__name__}::Warning::Skipping loading parameter filter file from cloud..') if (('unseen_data_folders' not in data_config) and ('dataset' in self.in_config) and ('unseen_data_folders' in self.in_config['dataset'])): data_config['unseen_data_folders'] = self.in_config['dataset']['unseen_data_folders'] return (split_config, (config['trainer']['batch_size'] if ('trainer' in config) else config['batch_size']), data_config) def last_best_validation_loss(self): run = self._run_object() return (run.summary['best_valid_loss'] if ('best_valid_loss' in run.summary) else None) def NN_config(self): config = self._run_config() return config['NN'] def add_statistic(self, tag, info, log=''): if log: print(f'{self.__class__.__name__}::Saving statistic <{log}>:') print((json.dumps(info, sort_keys=True, indent=2) if isinstance(info, dict) else info)) if (not self.run_id): print(f'{self.__class__.__name__}::Warning::Experiment not connected to the cloud. Statistic {tag} not synced') return if self.initialized: wb.run.summary[tag] = info elif isinstance(info, dict): for key in info: self.add_statistic(((tag + '.') + key), info[key]) else: run = self._run_object() run.summary[tag] = info run.summary.update() def add_config(self, tag, info): if self.initialized: wb.config[tag] = info else: raise RuntimeError('ExperimentWrappper:Error:Cannot add config to finished run') def add_artifact(self, path, name, type): if (not self.run_id): print(f'{self.__class__.__name__}::Warning::Experiment not connected to the cloud. Artifact {name} not synced') return path = Path(path) if (not self.initialized): print('Experiment::Reactivating wandb run to upload an artifact {}!'.format(name)) wb.init(id=self.run_id, project=self.project, resume='allow') artifact = wb.Artifact(name, type=type) if path.is_file(): artifact.add_file(str(path)) else: artifact.add_dir(str(path)) wb.run.log_artifact(artifact) if (not self.initialized): wb.finish() def is_finished(self): if (not self.run_id): print(f'{self.__class__.__name__}::Warning::Requested status of run not connected to wandb') return True run = self._run_object() return (run.state == 'finished') def load_dataset(self, data_root, eval_config={}, unseen=False, batch_size=5, load_all=False): (split, _, data_config) = self.data_info() if unseen: load_all = True data_config.update(data_folders=data_config['unseen_data_folders']) split = (split if (not load_all) else None) data_config.update(eval_config) data_class = getattr(data, data_config['class']) dataset = data_class(data_root, data_config, gt_caching=data_config['gt_caching'], feature_caching=data_config['feature_caching']) if (('wrapper' in data_config) and (data_config['wrapper'] is not None)): datawrapper_class = getattr(data, data_config['wrapper']) datawrapper = datawrapper_class(dataset, known_split=split, batch_size=batch_size) else: datawrapper = data.RealisticDatasetDetrWrapper(dataset, known_split=split, batch_size=batch_size) return (dataset, datawrapper) def load_detr_dataset(self, data_root, eval_config={}, unseen=False, batch_size=5, load_all=False): (split, _, data_config) = self.data_info() if unseen: load_all = True data_config.update(data_folders=data_config['unseen_data_folders']) split = (split if (not load_all) else None) data_config.update(eval_config) data_class = getattr(data, data_config['class']) dataset = data_class(data_root, data_config, gt_caching=True, feature_caching=False) datawrapper = data.RealisticDatasetDetrWrapper(dataset, known_split=split, batch_size=batch_size) return (dataset, datawrapper) def load_detr_model(self, data_config, others=False): (model, criterion) = models.build_former(self.in_config) device = ('cuda:0' if torch.cuda.is_available() else 'cpu') model = nn.DataParallel(model, device_ids=[device]) criterion.to(device) state_dict = self.get_best_model(device=device)['model_state_dict'] model.load_state_dict(state_dict) criterion.print_debug() return (model, criterion, device) def recover_detr_model(self, data_config=None): if (not data_config): data_config = self.data_info()[(- 1)] (model, criterion) = models.build_former(yaml.safe_load(open(self.in_config['experiment']['local_path'], 'r'))) device = ('cuda:0' if torch.cuda.is_available() else 'cpu') model = nn.DataParallel(model, device_ids=[device]) criterion.to(device) state_dict = self.get_best_model(device=device)['model_state_dict'] model.load_state_dict(state_dict) criterion.print_debug() return (model, criterion, device) def prediction(self, save_to, model, datawrapper, criterion=None, nick='test', sections=['test'], art_name='multi-data', use_gt_stitches=False): prediction_path = datawrapper.predict(model, save_to=save_to, sections=sections, orig_folder_names=True, use_gt_stitches=use_gt_stitches) if nick: self.add_statistic((nick + '_folder'), os.path.basename(prediction_path), log='Prediction save path') if art_name: art_name = (art_name if (len(datawrapper.dataset.data_folders) > 1) else datawrapper.dataset.data_folders[0]) self.add_artifact(prediction_path, art_name, 'result') return prediction_path def prediction_single(self, save_to, model, datawrapper, image, data_name='outside_single'): (panel_order, panel_idx, prediction_img) = datawrapper.predict_single(model, image, data_name, save_to) return (panel_order, panel_idx, prediction_img) def run_single_img(self, image, model, datawrapper): return datawrapper.run_single_img(image, model, datawrapper) def checkpoint_filename(self, check_id=None): check_id_str = ('_{}'.format(check_id) if (check_id is not None) else '') return '{}{}.pth'.format(self.checkpoint_filetag, check_id_str) def artifactname(self, tag, with_version=True, version=None, custom_alias=None): basename = ((((self.run_name + '_') + self.run_id) + '_') + tag) if (custom_alias is not None): return ((basename + ':') + custom_alias) version_tag = ((':v' + str(version)) if (version is not None) else ':latest') return ((basename + version_tag) if with_version else basename) def final_filename(self): return (self.final_filetag + '.pth') def cloud_path(self): if (not self.run_id): raise RuntimeError('ExperimentWrappper:Error:Need to know run id to get path in wandb could') return ((((self.wandb_username + '/') + self.project) + '/') + self.run_id) def local_wandb_path(self): print(wb.run.dir) return Path(wb.run.dir) def local_artifact_path(self): path = ((self.local_wandb_path() / 'artifacts') / self.run_id) if (not path.exists()): path.mkdir(parents=True) return path def get_checkpoint_file(self, to_path=None, version=None, device=None): if (not self.run_id): raise RuntimeError('ExperimentWrappper:Error:Need to know run id to restore specific checkpoint from the could') try: art_path = self._load_artifact(self.artifactname('checkpoint', version=version), to_path=to_path) for file in art_path.iterdir(): return self._load_model_from_file(file, device) except (RuntimeError, requests.exceptions.HTTPError, wb.apis.CommError) as e: print("ExperimentWrappper::Error::checkpoint from version '{}'is corrupted or lost: {}".format((version if version else 'latest'), e)) raise e def get_best_model(self, to_path=None, device=None): if (('pre-trained' in self.in_config['experiment']) and (self.in_config['experiment']['pre-trained'] is not None) and os.path.exists(self.in_config['experiment']['pre-trained'])): print(f'{self.__class__.__name__}::Info::Loading locally saved model') return self._load_model_from_file(self.in_config['experiment']['pre-trained'], device) elif (('pre-trained' in self.in_config['NN']) and (self.in_config['NN']['pre-trained'] is not None) and os.path.exists(self.in_config['NN']['pre-trained'])): print(f'{self.__class__.__name__}::Info::Loading locally saved model') return self._load_model_from_file(self.in_config['NN']['pre-trained'], device) elif self.run_id: try: art_path = self._load_artifact(self.artifactname('checkpoint', custom_alias='best'), to_path=to_path) for file in art_path.iterdir(): print(file) if (device is not None): return torch.load(file, map_location=device) else: return torch.load(file) except requests.exceptions.HTTPError: raise RuntimeError('ExperimentWrappper:Error:No file with best weights found in run {}'.format(self.cloud_path())) else: raise RuntimeError('ExperimentWrappper:Error:Need to know run_id to restore best model from the could OR path to the locally saved model ') def save_checkpoint(self, state, aliases=[], wait_for_upload=False): if (not self.initialized): raise RuntimeError('Experiment::cannot save checkpoint files to non-active wandb runs') print('Experiment::Saving model state -- checkpoint artifact') filename = self.checkpoint_filename(self.checkpoint_counter) artifact = wb.Artifact(self.artifactname('checkpoint', with_version=False), type='checkpoint') self.checkpoint_counter += 1 torch.save(state, (self.local_artifact_path() / filename)) artifact.add_file(str((self.local_artifact_path() / filename))) wb.run.log_artifact(artifact, aliases=(['latest'] + aliases)) if wait_for_upload: self._wait_for_upload(self.artifactname('checkpoint', version=(self.checkpoint_counter - 1))) def get_file(self, filename, to_path='.'): if (not self.run_id): raise RuntimeError('ExperimentWrappper:Error:Need to know run id to restore a file from the could') wb.restore(filename, run_path=((self.project + '/') + self.run_id), replace=True, root=to_path) def _load_artifact(self, artifact_name, to_path=None): print('Experiment::Requesting artifacts: {}'.format(artifact_name)) api = wb.Api({'project': self.project}) artifact = api.artifact(name=artifact_name) filepath = artifact.download((str(to_path) if to_path else None)) print('Experiment::Artifact saved to: {}'.format(filepath)) return Path(filepath) def _run_object(self): return wb.Api().run(self.cloud_path()) def _run_config(self): if self.run_id: run = wb.Api().run(self.cloud_path()) return run.config else: return self.in_config def _wait_for_upload(self, artifact_name, max_attempts=10): print('Experiment::Waiting for artifact {} upload'.format(artifact_name)) attempt = 1 while (attempt <= max_attempts): try: time.sleep(5) self._load_artifact(artifact_name) print('Requested version is successfully syncronized') break except (ValueError, wb.CommError): attempt += 1 print('Trying again') if (attempt > max_attempts): print('Experiment::Warning::artifact {} is still not syncronized'.format(artifact_name)) def _load_model_from_file(self, file, device=None): print(file) if (device is not None): return torch.load(file, map_location=device) else: return torch.load(file)
def expose_couchdb() -> None: try: run_check_process('kubectl apply -f openwhisk/couchdb-service.yaml') except (subprocess.CalledProcessError, FileNotFoundError) as e: logging.error('Cannot expose Couch DB, reason: {}'.format(e))
def config(): exp_name = 'CoTrain' seed = 0 video_datasets = ['wevid', 'howto100m', 'yttemporal'] image_datasets = ['cc3m', 'cc12m'] loss_names = _loss_names({'vtm': 1, 'mlm': 1}) batch_size = 4096 linear_evaluation = False draw_false_image = 1 train_transform_keys = ['pixelbert'] val_transform_keys = ['pixelbert'] image_size = 224 patch_size = 16 max_image_len = (- 1) draw_false_video = 1 video_only = False num_frames = 3 vqav2_label_size = 3129 msrvttqa_label_size = 1501 max_text_len = 40 tokenizer = 'pretrained/bert-base-uncased' vocab_size = 30522 whole_word_masking = False mlm_prob = 0.15 draw_false_text = 0 draw_options_text = 0 vit = 'vit_base_patch16_224' hidden_size = 768 num_heads = 12 num_layers = 12 mlp_ratio = 4 drop_rate = 0.1 shared_embedding_dim = 512 save_checkpoints_interval = 1 optim_type = 'adamw' learning_rate = 0.0001 weight_decay = 0.01 decay_power = 1 max_epoch = 100 max_steps = 25000 warmup_steps = 2500 end_lr = 0 lr_mult = 1 backend = 'a100' get_recall_metric = False get_ind_recall_metric = False retrieval_views = 3 resume_from = None fast_dev_run = False val_check_interval = 1.0 test_only = False data_root = '' log_dir = 'result' per_gpu_batchsize = 0 num_gpus = 1 num_nodes = 1 load_path = '' num_workers = 16 precision = 16
class CLIPSegTextConfig(PretrainedConfig): model_type = 'clipseg_text_model' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig': (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if (config_dict.get('model_type') == 'clipseg'): config_dict = config_dict['text_config'] if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs)
def aggregate_mean(transformed_features, graph_adj, degrees, name): with tf.name_scope(name): output = tf.sparse_tensor_dense_matmul(graph_adj, transformed_features) return (output / degrees)
class LearningRateScheduler(lr_scheduler._LRScheduler): def __init__(self, optimizer, solver, start_iter=0, iter_per_epoch=(- 1), last_epoch=(- 1)): self.solver = solver self.iteration = start_iter if ('MAX_ITER' in self.solver): self.milestones = self.solver.STEPS self.max_iter = self.solver.MAX_ITER self.warmup_iters = self.solver.WARM_UP_ITERS else: self.iter_per_epoch = iter_per_epoch self.conver_epoch2iter() assert (self.solver.LR_POLICY in ['STEP', 'COSINE', 'STEP_COSINE', 'POLY']) assert (self.solver.WARM_UP_METHOD in ['CONSTANT', 'LINEAR']) assert (list(self.milestones) == sorted(self.milestones)) self.gamma = self.solver.GAMMA self.warmup_factor = self.solver.WARM_UP_FACTOR self.warmup_method = self.solver.WARM_UP_METHOD self.lr_factor = 0 self.info = dict(best_acc=0.0, best_epoch=1, cur_acc=0.0, cur_epoch=1) super().__init__(optimizer, last_epoch) def conver_epoch2iter(self): self.max_iter = (self.solver.MAX_EPOCHS * self.iter_per_epoch) self.warmup_iters = (self.solver.WARM_UP_EPOCH * self.iter_per_epoch) self.milestones = [(epoch * self.iter_per_epoch) for epoch in self.solver.STEPS] def get_lr(self): warmup_factor = self.get_warmup_factor(self.warmup_method, self.iteration, self.warmup_iters, self.warmup_factor) if (self.solver.LR_POLICY == 'STEP'): lr_factor = self.get_step_factor(warmup_factor) elif (self.solver.LR_POLICY == 'COSINE'): lr_factor = self.get_cosine_factor(warmup_factor) elif (self.solver.LR_POLICY == 'STEP_COSINE'): if (self.iteration < self.milestones[(- 1)]): lr_factor = self.get_step_factor(warmup_factor) else: lr_factor = self.get_cosine_lrs(warmup_factor) elif (self.solver.LR_POLICY == 'POLY'): lr_factor = self.get_poly_factor(warmup_factor) else: raise KeyError('Unknown SOLVER.LR_POLICY: {}'.format(self.solver.LR_POLICY)) ratio = _get_lr_change_ratio(lr_factor, self.lr_factor) if ((self.lr_factor != lr_factor) and (ratio > self.solver.LOG_LR_CHANGE_THRESHOLD)): if (((lr_factor * self.solver.BASE_LR) > 1e-07) and (self.iteration > 1)): logging_rank('Changing learning rate {:.6f} -> {:.6f}'.format((self.lr_factor * self.solver.BASE_LR), (lr_factor * self.solver.BASE_LR))) self.lr_factor = lr_factor self.iteration += 1 return [(lr_factor * base_lr) for base_lr in self.base_lrs] def get_step_factor(self, warmup_factor): return (warmup_factor * (self.gamma ** bisect_right(self.milestones, self.iteration))) def get_cosine_factor(self, warmup_factor): return ((warmup_factor * 0.5) * (1.0 + math.cos(((math.pi * self.iteration) / self.max_iter)))) def get_poly_factor(self, warmup_factor): return (warmup_factor * ((1.0 - float((self.iteration / self.max_iter))) ** self.solver.LR_POW)) def _compute_values(self): return self.get_lr() def get_warmup_factor(self, method, iter, warmup_iters, warmup_factor): if (iter >= warmup_iters): return 1.0 if (method == 'CONSTANT'): return warmup_factor elif (method == 'LINEAR'): alpha = (iter / warmup_iters) return ((warmup_factor * (1 - alpha)) + alpha) else: raise ValueError('Unknown warmup method: {}'.format(method)) def state_dict(self): return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')} def load_state_dict(self, state_dict): self.__dict__.update(state_dict)
def count_all_paths_inefficient(fsa: Fsa, num_frames: int) -> int: return len(list(iterate_all_paths(fsa=fsa, num_frames=num_frames)))
class Dict(object): def __init__(self, data=None, lower=False, seq_len=50): self.idxToLabel = {} self.labelToIdx = {} self.frequencies = {} self.lower = lower self.seq_length = seq_len self.special = [] if (data is not None): if (type(data) == str): self.loadFile(data) else: self.addSpecials(data) def size(self): return len(self.idxToLabel) def loadFile(self, filename): for line in codecs.open(filename, 'rb', 'utf-8'): fields = line.split() if (len(fields) > 2): label = ' '.join(fields[:(- 1)]) idx = int(fields[(- 1)]) else: label = fields[0] idx = int(fields[1]) self.add(label, idx) def writeFile(self, filename): with codecs.open(filename, 'w', 'utf-8') as file: for i in range(self.size()): label = self.idxToLabel[i] file.write(('%s %d\n' % (label, i))) file.close() def lookup(self, key, default=None): key = (key.lower() if self.lower else key) try: return self.labelToIdx[key] except KeyError: return default def getLabel(self, idx, default=None): try: return self.idxToLabel[idx] except KeyError: return default def addSpecial(self, label, idx=None): idx = self.add(label, idx) self.special += [idx] def addSpecials(self, labels): for label in labels: self.addSpecial(label) def add(self, label, idx=None): label = (label.lower() if self.lower else label) if (idx is not None): self.idxToLabel[idx] = label self.labelToIdx[label] = idx elif (label in self.labelToIdx): idx = self.labelToIdx[label] else: idx = len(self.idxToLabel) self.idxToLabel[idx] = label self.labelToIdx[label] = idx if (idx not in self.frequencies): self.frequencies[idx] = 1 else: self.frequencies[idx] += 1 return idx def prune(self, size): if (size >= self.size()): return self freq = torch.Tensor([self.frequencies[i] for i in range(len(self.frequencies))]) (_, idx) = torch.sort(freq, 0, True) newDict = Dict() newDict.lower = self.lower newDict.seq_length = self.seq_length for i in self.special: newDict.addSpecial(self.idxToLabel[i]) for i in idx[:size]: newDict.add(self.idxToLabel[i]) return newDict def convertToIdx(self, labels, unkWord, padding=False, bosWord=None, eosWord=None): vec = [] if (bosWord is not None): vec += [self.lookup(bosWord)] unk = self.lookup(unkWord) vec += [self.lookup(label, default=unk) for label in labels] if (padding == True): vec += ([3] * (self.seq_length - len(labels))) if (eosWord is not None): vec += [self.lookup(eosWord)] return torch.LongTensor(vec) def convertToLabels(self, idx, stop): labels = [] for i in idx: labels += [self.getLabel(i)] if (i == stop): break return labels
def p_return_statement(s): pos = s.position() s.next() if (s.sy not in statement_terminators): value = p_testlist(s) else: value = None return Nodes.ReturnStatNode(pos, value=value)
def conv2d_3d_bn_act(data, num_filter, height, width, kernel=(1, 1, 1), stride=(1, 1, 1), pad=(0, 0, 0), dilate=(1, 1, 1), no_bias=False, act_type='relu', momentum=0.9, eps=(1e-05 + 1e-12), fix_gamma=True, name=None, use_3d=True, use_bn=True, use_global_stats=False, **kwargs): if (not use_bn): return conv2d_3d_act(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, dilate=dilate, no_bias=no_bias, act_type=act_type, name=name, use_3d=use_3d) if use_3d: return conv3d_bn_act(data=data, num_filter=num_filter, height=height, width=width, kernel=kernel, stride=stride, pad=pad, dilate=dilate, no_bias=no_bias, act_type=act_type, momentum=momentum, eps=eps, fix_gamma=fix_gamma, name=name, use_global_stats=use_global_stats, **kwargs) else: return conv2d_bn_act(data=data, num_filter=(num_filter * kernel[0]), kernel=kernel[1:], stride=stride[1:], pad=pad[1:], dilate=dilate[1:], no_bias=no_bias, act_type=act_type, momentum=momentum, eps=eps, fix_gamma=fix_gamma, name=name, use_global_stats=use_global_stats, **kwargs)
class BasicConv2d(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False) self.bn = nn.BatchNorm2d(out_planes) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) return x
def model_validate(model, validation_loader, dir_to_save, writer, epoch, DEVICE): batch_num = 0 validation_loss = 0 avg_pesq = 0 avg_stoi = 0 all_batch_input = [] all_batch_label = [] all_batch_output = [] all_batch_real_spec = [] all_batch_img_spec = [] all_batch_pesq = [] f_pesq = open(((dir_to_save + '/pesq_epoch_') + ('%d' % epoch)), 'a') f_stoi = open(((dir_to_save + '/stoi_epoch_') + ('%d' % epoch)), 'a') model.eval() with torch.no_grad(): for (inputs, labels) in Bar(validation_loader): batch_num += 1 inputs = inputs.float().to(DEVICE) labels = labels.float().to(DEVICE) (mask_real, mask_imag, real_spec, img_spec, outputs) = model(inputs) loss = model.loss(outputs, labels, real_spec, img_spec) estimated_wavs = outputs.cpu().detach().numpy() clean_wavs = labels.cpu().detach().numpy() pesq = cal_pesq(estimated_wavs, clean_wavs) stoi = cal_stoi(estimated_wavs, clean_wavs) for i in range(len(pesq)): f_pesq.write('{:.6f}\n'.format(pesq[i])) f_stoi.write('{:.4f}\n'.format(stoi[i])) pesq = np.reshape(pesq, (1, (- 1))) stoi = np.reshape(stoi, (1, (- 1))) avg_pesq += (sum(pesq[0]) / len(inputs)) avg_stoi += (sum(stoi[0]) / len(inputs)) if ((epoch % 10) == 0): all_batch_input.extend(inputs) all_batch_label.extend(labels) all_batch_output.extend(outputs) all_batch_real_spec.extend(mask_real) all_batch_img_spec.extend(mask_imag) all_batch_pesq.extend(pesq[0]) validation_loss += loss if ((epoch % 10) == 0): all_batch_pesq = np.reshape(all_batch_pesq, ((- 1), 1)) max_pesq_index = max_index(all_batch_pesq) min_pesq_index = min_index(all_batch_pesq) avg_pesq_index = near_avg_index(all_batch_pesq) writer.save_samples_we_want('max_pesq', all_batch_input[max_pesq_index], all_batch_label[max_pesq_index], all_batch_output[max_pesq_index], epoch) writer.save_samples_we_want('min_pesq', all_batch_input[min_pesq_index], all_batch_label[min_pesq_index], all_batch_output[min_pesq_index], epoch) writer.save_samples_we_want('avg_pesq', all_batch_input[avg_pesq_index], all_batch_label[avg_pesq_index], all_batch_output[avg_pesq_index], epoch) clip_num = 10 writer.save_samples_we_want('n{}_sample'.format(clip_num), all_batch_input[clip_num], all_batch_label[clip_num], all_batch_output[clip_num], epoch) validation_loss /= batch_num avg_pesq /= batch_num avg_stoi /= batch_num f_pesq.write('Avg: {:.6f}\n'.format(avg_pesq)) f_stoi.write('Avg: {:.4f}\n'.format(avg_stoi)) f_pesq.close() f_stoi.close() return (validation_loss, avg_pesq, avg_stoi)
def cosine_dist(x, y, pw=False): score = dot_dist(x, y, pw) x = nd.norm(x, ord=2, axis=(- 1)) y = nd.norm(y, ord=2, axis=(- 1)) if (pw is False): x = x.expand_dims(axis=1) y = y.expand_dims(axis=0) return (score / (x * y))
class LinearTrendFeatures(FeaturesPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) def create_pipeline(self, train: NumpyOrPandas) -> LAMLTransformer: transformers_list = [] datetimes = [get_columns_by_role(train, 'Datetime')[0]] if (len(datetimes) > 0): dt_processing = SequentialTransformer([ColumnsSelector(keys=datetimes), TimeToNum(), StandardScaler()]) transformers_list.append(dt_processing) union_all = UnionTransformer(transformers_list) return union_all
.usefixtures('num_cpus', 'io_type') class TestTransmon(StandardTests): def setup_class(cls): cls.qbt = None cls.qbt_type = Transmon cls.file_str = 'transmon' cls.op1_str = 'n_operator' cls.op2_str = 'n_operator' cls.param_name = 'ng' cls.param_list = np.linspace((- 1), 1, 100) def test_plot_n_wavefunction(self): self.qbt = Transmon(EJ=1.0, EC=1.0, ng=0.0, ncut=10) self.qbt.plot_n_wavefunction(esys=None, which=1, mode='real')
class ModelStructure(metaclass=ABCMeta): def get_default_model_parameters(): return HParams() def get_identity_param_list(): return []
def tf_efficientnet_b6(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model
class DatasetConfig(FairseqDataclass): num_workers: int = field(default=1, metadata={'help': 'how many subprocesses to use for data loading'}) skip_invalid_size_inputs_valid_test: bool = field(default=False, metadata={'help': 'ignore too long or too short lines in valid and test set'}) max_tokens: Optional[int] = field(default=None, metadata={'help': 'maximum number of tokens in a batch'}) batch_size: Optional[int] = field(default=None, metadata={'help': 'number of examples in a batch', 'argparse_alias': '--max-sentences'}) required_batch_size_multiple: int = field(default=8, metadata={'help': 'batch size will be a multiplier of this value'}) required_seq_len_multiple: int = field(default=1, metadata={'help': 'maximum sequence length in batch will be a multiplier of this value'}) dataset_impl: Optional[DATASET_IMPL_CHOICES] = field(default=None, metadata={'help': 'output dataset implementation'}) data_buffer_size: int = field(default=10, metadata={'help': 'Number of batches to preload'}) train_subset: str = field(default='train', metadata={'help': 'data subset to use for training (e.g. train, valid, test)'}) valid_subset: str = field(default='valid', metadata={'help': 'comma separated list of data subsets to use for validation (e.g. train, valid, test)'}) validate_interval: int = field(default=1, metadata={'help': 'validate every N epochs'}) validate_interval_updates: int = field(default=0, metadata={'help': 'validate every N updates'}) validate_after_updates: int = field(default=0, metadata={'help': 'dont validate until reaching this many updates'}) fixed_validation_seed: Optional[int] = field(default=None, metadata={'help': 'specified random seed for validation'}) disable_validation: bool = field(default=False, metadata={'help': 'disable validation'}) max_tokens_valid: Optional[int] = field(default=II('dataset.max_tokens'), metadata={'help': 'maximum number of tokens in a validation batch (defaults to --max-tokens)'}) batch_size_valid: Optional[int] = field(default=II('dataset.batch_size'), metadata={'help': 'batch size of the validation batch (defaults to --batch-size)', 'argparse_alias': '--max-sentences-valid'}) curriculum: int = field(default=0, metadata={'help': "don't shuffle batches for first N epochs"}) gen_subset: str = field(default='test', metadata={'help': 'data subset to generate (train, valid, test)'}) num_shards: int = field(default=1, metadata={'help': 'shard generation over N shards'}) shard_id: int = field(default=0, metadata={'help': 'id of the shard to generate (id < num_shards)'})
class DetectionLoss(nn.Module): __constants__ = ['num_classes'] def __init__(self, config): super(DetectionLoss, self).__init__() self.config = config self.num_classes = config.num_classes self.alpha = config.alpha self.gamma = config.gamma self.delta = config.delta self.box_loss_weight = config.box_loss_weight self.label_smoothing = config.label_smoothing self.legacy_focal = config.legacy_focal self.use_jit = config.jit_loss def forward(self, cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor], cls_targets: List[torch.Tensor], box_targets: List[torch.Tensor], num_positives: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]: l_fn = loss_fn if ((not torch.jit.is_scripting()) and self.use_jit): l_fn = loss_jit return l_fn(cls_outputs, box_outputs, cls_targets, box_targets, num_positives, num_classes=self.num_classes, alpha=self.alpha, gamma=self.gamma, delta=self.delta, box_loss_weight=self.box_loss_weight, label_smoothing=self.label_smoothing, legacy_focal=self.legacy_focal)
_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] def setUpClass(cls): cls._token = login(username=USER, password=PASS) def tearDownClass(cls): try: delete_repo(token=cls._token, name='test-tokenizer') except HTTPError: pass try: delete_repo(token=cls._token, name='test-tokenizer-org', organization='valid_org') except HTTPError: pass try: delete_repo(token=cls._token, name='test-dynamic-tokenizer') except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, 'vocab.txt') with open(vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([(x + '\n') for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.save_pretrained(os.path.join(tmp_dir, 'test-tokenizer'), push_to_hub=True, use_auth_token=self._token) new_tokenizer = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer') self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, 'vocab.txt') with open(vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([(x + '\n') for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.save_pretrained(os.path.join(tmp_dir, 'test-tokenizer-org'), push_to_hub=True, use_auth_token=self._token, organization='valid_org') new_tokenizer = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org') self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_dynamic_tokenizer(self): with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, 'vocab.txt') with open(vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([(x + '\n') for x in self.vocab_tokens])) tokenizer = FakeTokenizer(vocab_file) tokenizer._auto_map = ('tokenizer.FakeTokenizer', None) with tempfile.TemporaryDirectory() as tmp_dir: repo = Repository(tmp_dir, clone_from=f'{USER}/test-dynamic-tokenizer', use_auth_token=self._token) print(os.listdir(tmp_dir)) tokenizer.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, 'tokenizer.py'), 'w') as f: f.write(FAKE_TOKENIZER_CODE) repo.push_to_hub() tokenizer = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=True) self.assertEqual(tokenizer.__class__.__name__, 'FakeTokenizer') tokenizer._auto_map = ('tokenizer.FakeTokenizer', 'tokenizer.FakeTokenizerFast') with tempfile.TemporaryDirectory() as tmp_dir: repo = Repository(tmp_dir, clone_from=f'{USER}/test-dynamic-tokenizer', use_auth_token=self._token) print(os.listdir(tmp_dir)) tokenizer.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, 'tokenizer.py'), 'w') as f: f.write(FAKE_TOKENIZER_CODE) repo.push_to_hub() tokenizer = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=True) self.assertEqual(tokenizer.__class__.__name__, 'FakeTokenizerFast') tokenizer = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', use_fast=False, trust_remote_code=True) self.assertEqual(tokenizer.__class__.__name__, 'FakeTokenizer')
class ThrombocytopeniaCodeLabeler(OMOPConceptCodeLabeler): original_omop_concept_codes = ['SNOMED/', 'SNOMED/']
def set_time_step(sim, dtfactor): ps = sim.particles[1:sim.N_real] tperi = np.min([((p.P * ((1 - p.e) ** 1.5)) / np.sqrt((1 + p.e))) for p in ps]) dt = (tperi * dtfactor) sim.dt = dt
class _ConvOrTransposedConv(rf.Module): nd: Optional[int] = None _transposed: bool groups: Optional[int] = None def __init__(self, in_dim: Dim, out_dim: Dim, filter_size: Union[(Sequence[Union[(int, Dim)]], int, Dim)], *, padding: str, with_bias: bool): super().__init__() assert (isinstance(in_dim, Dim) and isinstance(out_dim, Dim)) self.in_dim = rf.dim_match_priority_when_needed(in_dim, out_dim) self.out_dim = out_dim if isinstance(filter_size, (int, Dim)): if (self.nd in (None, 1)): filter_size = [filter_size] else: filter_size = ([filter_size] * self.nd) assert isinstance(filter_size, (tuple, list)) if self.nd: assert (self.nd == len(filter_size)) else: self.nd = len(filter_size) self.filter_size = [(s if isinstance(s, Dim) else Dim(s, name=f'filter-dim{i}')) for (i, s) in enumerate(filter_size)] self.padding = padding filter_in_dim = in_dim if ((self.groups is not None) and (self.groups > 1)): assert (not self._transposed) filter_in_dim //= self.groups filter_in_dim = rf.dim_match_priority_when_needed(filter_in_dim, self.out_dim) self.filter_in_dim = filter_in_dim self.filter = rf.Parameter((([self.out_dim, self.filter_in_dim] if (not self._transposed) else [self.in_dim, self.out_dim]) + self.filter_size)) self.filter.initial = rf.init.Glorot() self.with_bias = with_bias self.bias = None if self.with_bias: self.bias = rf.Parameter([self.out_dim]) self.bias.initial = 0.0 def _call_nd1(self, source: Tensor, *, in_spatial_dim: Dim, out_spatial_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: assert (self.nd == 1) (out, (out_spatial_dim,)) = self.__class__.__base__.__call__(self, source, in_spatial_dims=[in_spatial_dim], out_spatial_dims=([out_spatial_dim] if out_spatial_dim else None)) return (out, out_spatial_dim)
def test_get_perpendicular_vector(): input_vector = np.array([0.3, 0.4, 0.5]) random_perpendicular_vector = get_perpendicular_vector(input_vector) dot_product = np.dot(input_vector, random_perpendicular_vector) npt.assert_almost_equal(dot_product, 0.0)
def parse_command(args): parser = create_main_parser() (general_options, args_else) = parser.parse_args(args) if general_options.version: sys.stdout.write(parser.version) sys.stdout.write(os.linesep) sys.exit() if ((not args_else) or ((args_else[0] == 'help') and (len(args_else) == 1))): parser.print_help() sys.exit() cmd_name = args_else[0] if (cmd_name not in commands_dict): guess = get_similar_commands(cmd_name) msg = ['unknown command "{}"'.format(cmd_name)] if guess: msg.append('maybe you meant "{}"'.format(guess)) raise CommandError(' - '.join(msg)) cmd_args = args[:] cmd_args.remove(cmd_name) return (cmd_name, cmd_args)
def format_error(message): stars = (('*' * 72) + '\n') sys.stderr.write(stars) try: import traceback traceback.print_exc(file=sys.stderr) sys.stderr.write(stars) except BaseException: pass sys.stderr.write(message) sys.stderr.write(stars)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--op', default='add_op', dest='op', type=str) parser.add_argument('--benchmark_c2_net', default=False, dest='benchmark_c2_net', action='store_true') parser.add_argument('--use_throughput_benchmark', default=False, dest='use_throughput_benchmark', action='store_true') parser.add_argument('--debug', default=False, dest='debug', action='store_true') parser.add_argument('--save', default=False, dest='save', action='store_true') parser.add_argument('--eager_mode', default=False, dest='eager_mode', action='store_true') parser.add_argument('--num_warmup_iters', type=int, default=100) parser.add_argument('--num_iters', type=int, default=1000) args = parser.parse_args() if (args.op not in SUPPORTED_OPS): print('Op {} is not supported: Supported ops are:{}'.format(args.op, SUPPORTED_OPS)) return assert (not (args.benchmark_c2_net and args.use_throughput_benchmark)), 'Benchmarking of C2 net via throughput benchmarking is not yet supported' num_warmup_iters = args.num_warmup_iters num_iters = args.num_iters config = BenchmarkConfig(num_warmup_iters, num_iters) graph_mode = True if args.eager_mode: graph_mode = False result = {} if (args.op == 'add_op'): num_params = 2 if args.benchmark_c2_net: module_config = ModuleConfig(None, 'Sum', num_params, None) else: module_config = ModuleConfig(add_tensors_loop, None, num_params, graph_mode) benchmark_simple_fn(args, config, module_config, SimpleAddModule, result) print_results(result)
def extract_statement_within_size(root, max_node=10, endswith=None, code_string=None, tokenizer=None): if (endswith is None): endswith = ['statement'] statements = [] queue = [root] while (len(queue) > 0): current_node = queue[0] queue = queue[1:] node_count = count_nodes(current_node) if ((code_string is not None) and (tokenizer is not None)): tokens = tokenizer(code_string, current_node) current_code = ' '.join(tokens).strip() else: current_code = 'please provide code string and tokenizer to analyze code length' if (any((str(current_node.type).endswith(e) for e in endswith)) and (1 < node_count < max_node) and (len(current_code) > 0)): statements.append(current_node) for child in current_node.children: queue.append(child) return statements
def all_reduce(tensor, group, op='sum'): if use_xla(): assert (isinstance(group, tuple) and (group[0] == 'tpu')) tensor = [tensor] return xm.all_reduce(op, tensor, groups=group[1])[0] else: if (op == 'sum'): op = dist.ReduceOp.SUM elif (op == 'max'): op = dist.ReduceOp.MAX else: raise NotImplementedError dist.all_reduce(tensor, op=op, group=group) return tensor
def randomNumaAwareInit(A): HPTTlib.randomNumaAwareInit(ctypes.c_void_p(A.ctypes.data), ctypes.cast(A.ctypes.shape, ctypes.POINTER(ctypes.c_voidp)), ctypes.c_int32(A.ndim))
_builder('laion400M') class Laion400MBuilder(Laion2BMultiBuilder): train_dataset_cls = LaionDataset DATASET_CONFIG_DICT = {'default': 'configs/datasets/laion/defaults_400M.yaml'}
def register_Ns3Icmpv4Echo_methods(root_module, cls): cls.add_constructor([param('ns3::Icmpv4Echo const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) cls.add_method('GetData', 'uint32_t', [param('uint8_t *', 'payload')], is_const=True) cls.add_method('GetDataSize', 'uint32_t', [], is_const=True) cls.add_method('GetIdentifier', 'uint16_t', [], is_const=True) cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) cls.add_method('GetSequenceNumber', 'uint16_t', [], is_const=True) cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) cls.add_method('SetData', 'void', [param('ns3::Ptr< ns3::Packet const >', 'data')]) cls.add_method('SetIdentifier', 'void', [param('uint16_t', 'id')]) cls.add_method('SetSequenceNumber', 'void', [param('uint16_t', 'seq')]) return
def main(): opt = get_args() decode_fn = Decoder(opt.decode, max_len=opt.max_len, beam_size=opt.beam_size) device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) model = torch.load(open(opt.model, mode='rb'), map_location=device) model = model.to(device) trg_i2c = {i: c for (c, i) in model.trg_c2i.items()} def decode_trg(seq): return [trg_i2c[i] for i in seq] maybe_mkdir(opt.out_file) with open(opt.in_file, 'r', encoding='utf-8') as in_fp, open(opt.out_file, 'w', encoding='utf-8') as out_fp: for line in in_fp.readlines(): toks = line.strip().split('\t') if ((len(toks) < 2) or (line[0] == '#')): out_fp.write(line) continue (word, tags) = (toks[1], toks[5]) (word, tags) = (list(word), tags.split(';')) src = encode(model, word, tags, device) src_mask = dummy_mask(src) (pred, _) = decode_fn(model, src, src_mask) pred = unpack_batch(pred)[0] pred_out = ''.join(decode_trg(pred)) toks[2] = pred_out out_fp.write(('\t'.join(toks) + '\n'))
def run(job_file: str, scheduler_name: str) -> None: scheduler = JobScheduler(job_file=job_file, scheduler=scheduler_name) scheduler.submit()
def make_validation_logging(): ndcg_vad_var = tf.Variable(0.0) critic_error_vad_var = tf.Variable(0.0) ndcg_vad_summary = tf.summary.scalar('ndcg_at_k_validation', ndcg_vad_var) critic_error_vad_summary = tf.summary.scalar('critic_error_validation', critic_error_vad_var) summary_vars = [ndcg_vad_summary, critic_error_vad_summary] validation_logging = tf.summary.merge(summary_vars) return {'ndcg_vad_var': ndcg_vad_var, 'critic_error_vad_var': critic_error_vad_var, 'validation_logging': validation_logging}
def test_batched_constraints(backend): config = MockConfig(par_order=['pois1', 'pois2', 'norm1', 'norm2'], par_map={'pois1': {'paramset': constrained_by_poisson(name='pois1', is_scalar=False, n_parameters=1, inits=[1.0], bounds=[[0, 10]], auxdata=[12], factors=[12], fixed=False), 'slice': slice(0, 1), 'auxdata': [1]}, 'pois2': {'paramset': constrained_by_poisson(name='pois2', is_scalar=False, n_parameters=2, inits=([1.0] * 2), bounds=([[0, 10]] * 2), auxdata=[13, 14], factors=[13, 14], fixed=False), 'slice': slice(1, 3)}, 'norm1': {'paramset': constrained_by_normal(name='norm1', is_scalar=False, n_parameters=2, inits=([0] * 2), bounds=([[0, 10]] * 2), auxdata=[0, 0], sigmas=[1.5, 2.0], fixed=False), 'slice': slice(3, 5)}, 'norm2': {'paramset': constrained_by_normal(name='norm2', is_scalar=False, n_parameters=3, inits=([0] * 3), bounds=([[0, 10]] * 3), auxdata=[0, 0, 0], fixed=False), 'slice': slice(5, 8)}}) suggested_pars = (([1.0] * 3) + ([0.0] * 5)) constraint = poisson_constraint_combined(config) result = default_backend.astensor(pyhf.tensorlib.tolist(constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(suggested_pars)))) assert np.isclose(result, sum((default_backend.poisson_logpdf(data, rate) for (data, rate) in zip([12, 13, 14], [12, 13, 14])))) assert (result.shape == ()) suggested_pars = (([1.1] * 3) + ([0.0] * 5)) constraint = poisson_constraint_combined(config) result = default_backend.astensor(pyhf.tensorlib.tolist(constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(suggested_pars)))) assert np.isclose(result, sum((default_backend.poisson_logpdf(data, rate) for (data, rate) in zip([12, 13, 14], [(12 * 1.1), (13 * 1.1), (14 * 1.1)])))) assert (result.shape == ()) constraint = poisson_constraint_combined(config, batch_size=10) result = constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(([suggested_pars] * 10))) assert (result.shape == (10,)) suggested_pars = [([1.1, 1.2, 1.3] + ([0.0] * 5)), ([0.7, 0.8, 0.9] + ([0.0] * 5)), ([0.4, 0.5, 0.6] + ([0.0] * 5))] constraint = poisson_constraint_combined(config, batch_size=3) result = default_backend.astensor(pyhf.tensorlib.tolist(constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(suggested_pars)))) assert np.all(np.isclose(result, np.sum([[default_backend.poisson_logpdf(data, rate) for (data, rate) in zip([12, 13, 14], [(12 * 1.1), (13 * 1.2), (14 * 1.3)])], [default_backend.poisson_logpdf(data, rate) for (data, rate) in zip([12, 13, 14], [(12 * 0.7), (13 * 0.8), (14 * 0.9)])], [default_backend.poisson_logpdf(data, rate) for (data, rate) in zip([12, 13, 14], [(12 * 0.4), (13 * 0.5), (14 * 0.6)])]], axis=1))) assert (result.shape == (3,)) suggested_pars = (([1.0] * 3) + ([0.0] * 5)) constraint = gaussian_constraint_combined(config, batch_size=1) result = default_backend.astensor(pyhf.tensorlib.tolist(constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(suggested_pars)))) assert np.isclose(result[0], sum((default_backend.normal_logpdf(data, mu, sigma) for (data, mu, sigma) in zip([0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1.5, 2.0, 1.0, 1.0, 1.0])))) assert (result.shape == (1,)) suggested_pars = (([1.0] * 3) + [1, 2, 3, 4, 5]) constraint = gaussian_constraint_combined(config, batch_size=1) result = default_backend.astensor(pyhf.tensorlib.tolist(constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(suggested_pars)))) assert np.isclose(result[0], sum((default_backend.normal_logpdf(data, mu, sigma) for (data, mu, sigma) in zip([0, 0, 0, 0, 0], [1, 2, 3, 4, 5], [1.5, 2.0, 1.0, 1.0, 1.0])))) assert (result.shape == (1,)) suggested_pars = [(([1.0] * 3) + [1, 2, 3, 4, 5]), (([1.0] * 3) + [(- 1), (- 2), (- 3), (- 4), (- 5)]), (([1.0] * 3) + [(- 1), (- 2), 0, 1, 2])] constraint = gaussian_constraint_combined(config, batch_size=3) result = default_backend.astensor(pyhf.tensorlib.tolist(constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(suggested_pars)))) assert np.all(np.isclose(result, np.sum([[default_backend.normal_logpdf(data, mu, sigma) for (data, mu, sigma) in zip([0, 0, 0, 0, 0], [1, 2, 3, 4, 5], [1.5, 2.0, 1.0, 1.0, 1.0])], [default_backend.normal_logpdf(data, mu, sigma) for (data, mu, sigma) in zip([0, 0, 0, 0, 0], [(- 1), (- 2), (- 3), (- 4), (- 5)], [1.5, 2.0, 1.0, 1.0, 1.0])], [default_backend.normal_logpdf(data, mu, sigma) for (data, mu, sigma) in zip([0, 0, 0, 0, 0], [(- 1), (- 2), 0, 1, 2], [1.5, 2.0, 1.0, 1.0, 1.0])]], axis=1))) assert (result.shape == (3,)) constraint = gaussian_constraint_combined(config, batch_size=10) result = constraint.logpdf(pyhf.tensorlib.astensor(config.auxdata), pyhf.tensorlib.astensor(([suggested_pars] * 10))) assert (result.shape == (10,))
def test_round_strided(): data = np.array([101], dtype=np.int32) array = np.lib.stride_tricks.as_strided(data, (100,), strides=(0,)) result = ak.from_buffers(*ak.to_buffers(array), highlevel=False) assert np.shares_memory(result.data, data) assert ak.almost_equal(array, result)
def build_davis(cfg, train=True): if train: return DAVIS_Train(root=cfg.DATA.DAVIS_ROOT, output_size=cfg.DATA.SIZE, clip_n=cfg.DATA.TRAIN.FRAMES_PER_CLIP, max_obj_n=cfg.DATA.TRAIN.MAX_OBJECTS, max_skip=cfg.DATA.TRAIN.DAVIS_SKIP_INCREMENT[0], increment=cfg.DATA.TRAIN.DAVIS_SKIP_INCREMENT[1], samples=cfg.DATA.TRAIN.SAMPLES_PER_VIDEO, choice=cfg.DATA.TRAIN.SAMPLE_CHOICE, crop=cfg.DATA.TRAIN.CROP) else: single_obj = (cfg.DATA.VAL.DATASET_NAME == 'DAVIS16') return DAVIS_Test(root=cfg.DATA.DAVIS_ROOT, single_obj=single_obj)
('kuma_gen_enc_classifier') class KumaraswamyGenEncClassifier(RationaleBaseModel): def __init__(self, vocab: Vocabulary, generator: Params, encoder: Params, samples: int, lambda_init: float, desired_length: float, rationale_extractor: Model=None, initializer: InitializerApplicator=InitializerApplicator(), regularizer: Optional[RegularizerApplicator]=None): super(KumaraswamyGenEncClassifier, self).__init__(vocab, initializer, regularizer) self._vocabulary = vocab self._num_labels = self._vocabulary.get_vocab_size('labels') self._generator = Model.from_params(vocab=vocab, regularizer=regularizer, initializer=initializer, params=Params(generator)) self._encoder = Model.from_params(vocab=vocab, regularizer=regularizer, initializer=initializer, params=Params(encoder)) self._samples = samples self._desired_length = min(1.0, max(0.0, desired_length)) self._rationale_extractor = rationale_extractor self._loss_tracks = {k: Average() for k in ['_lasso_loss', '_base_loss', '_rat_length', '_lambda0', '_c0_ma', '_c0']} s_min = torch.Tensor([(- 0.1)]) s_max = torch.Tensor([1.1]) self.support = [s_min, s_max] self.lagrange_alpha = 0.9 self.lagrange_lr = 0.01 self.lambda_min = 1e-12 self.lambda_max = 5.0 self.register_buffer('lambda0', torch.full((1,), lambda_init)) self.register_buffer('c0_ma', torch.full((1,), 0.0)) initializer(self) def forward(self, document, query=None, label=None, metadata=None, rationale=None) -> Dict[(str, Any)]: generator_dict = self._generator(document, query, label) mask = generator_dict['mask'] assert ('a' in generator_dict) assert ('b' in generator_dict) (a, b) = (generator_dict['a'], generator_dict['b']) a = a.clamp(1e-06, 100.0) b = b.clamp(1e-06, 100.0) output_dict = {} sampler = HardKuma([a, b], support=[self.support[0].to(a.device), self.support[1].to(b.device)]) generator_dict['predicted_rationale'] = ((sampler.mean() > 0.5).long() * mask) if (self.prediction_mode or (not self.training)): if (self._rationale_extractor is None): sample_z = ((sampler.mean() > 0.5).long() * mask) else: prob_z = sampler.mean() sample_z = self._rationale_extractor.extract_rationale(prob_z, document, as_one_hot=True) output_dict['rationale'] = self._rationale_extractor.extract_rationale(prob_z, document, as_one_hot=False) sample_z = torch.Tensor(sample_z).to(prob_z.device).float() else: sample_z = sampler.sample() sample_z = (sample_z * mask) wordpiece_to_token = generator_dict['wordpiece-to-token'] wtt0 = torch.where((wordpiece_to_token == (- 1)), torch.tensor([0]).to(wordpiece_to_token.device), wordpiece_to_token) wordpiece_sample = util.batched_index_select(sample_z.unsqueeze((- 1)), wtt0) wordpiece_sample[(wordpiece_to_token.unsqueeze((- 1)) == (- 1))] = 1.0 def scale_embeddings(module, input, output): output = (output * wordpiece_sample) return output hook = self._encoder.embedding_layers[0].register_forward_hook(scale_embeddings) encoder_dict = self._encoder(document=document, query=query, label=label, metadata=metadata) hook.remove() loss = 0.0 if (label is not None): assert ('loss' in encoder_dict) base_loss = F.cross_entropy(encoder_dict['logits'], label) loss += base_loss pdf0 = (sampler.pdf(0.0) * mask) pdf_nonzero = ((1 - pdf0) * mask) lasso_loss = pdf_nonzero.sum(1) lengths = mask.sum(1) lasso_loss = (lasso_loss / (lengths + 1e-09)) lasso_loss = lasso_loss.mean() c0_hat = F.relu((lasso_loss - self._desired_length)) if self.training: self.c0_ma = ((self.lagrange_alpha * self.c0_ma) + ((1 - self.lagrange_alpha) * c0_hat.item())) c0 = (c0_hat + (self.c0_ma.detach() - c0_hat.detach())) if self.training: self.lambda0 = (self.lambda0 * torch.exp((self.lagrange_lr * c0.detach()))) self.lambda0 = self.lambda0.clamp(self.lambda_min, self.lambda_max) self._loss_tracks['_lasso_loss'](lasso_loss.item()) self._loss_tracks['_base_loss'](base_loss.item()) self._loss_tracks['_lambda0'](self.lambda0[0].item()) self._loss_tracks['_c0_ma'](self.c0_ma[0].item()) self._loss_tracks['_c0'](c0_hat.item()) regulariser_loss = (self.lambda0.detach() * c0)[0] loss += regulariser_loss output_dict['probs'] = encoder_dict['probs'] output_dict['predicted_labels'] = encoder_dict['predicted_labels'] output_dict['loss'] = loss output_dict['gold_labels'] = label output_dict['metadata'] = metadata output_dict['predicted_rationale'] = generator_dict['predicted_rationale'] self._loss_tracks['_rat_length'](util.masked_mean(generator_dict['predicted_rationale'], (mask == 1), dim=(- 1)).mean().item()) self._call_metrics(output_dict) return output_dict def _decode(self, output_dict) -> Dict[(str, Any)]: new_output_dict = {} new_output_dict['predicted_label'] = output_dict['predicted_labels'].cpu().data.numpy() new_output_dict['label'] = output_dict['gold_labels'].cpu().data.numpy() new_output_dict['metadata'] = output_dict['metadata'] new_output_dict['rationale'] = output_dict['rationale'] return new_output_dict def get_metrics(self, reset: bool=False) -> Dict[(str, float)]: base_metrics = super(KumaraswamyGenEncClassifier, self).get_metrics(reset) loss_metrics = {('_total' + k): v._total_value for (k, v) in self._loss_tracks.items()} loss_metrics.update({k: v.get_metric(reset) for (k, v) in self._loss_tracks.items()}) loss_metrics.update(base_metrics) return loss_metrics
def fpMinusZero(s): _z3_assert(isinstance(s, FPSortRef), 'sort mismatch') return FPNumRef(Z3_mk_fpa_zero(s.ctx_ref(), s.ast, True), s.ctx)
(scope='function', autouse=True) def reset_backend(): pyhf.set_backend('numpy', default=True) (yield reset_backend) pyhf.set_backend('numpy', default=True)