code
stringlengths
101
5.91M
def build_server_model(): inputs = Input(shape=21632) x = Dense(128, activation='relu')(inputs) outputs = Dense(10)(x) return Model(inputs=inputs, outputs=outputs, name='vfl_server_model')
.parametrize('dtype', [np.float32, np.float64]) def test_compute_ssim_gray(dtype: np.dtype) -> None: np_gray_img = (data.camera().astype(dtype) / 255) pt_gray_img = torch.as_tensor(np_gray_img) for sigma in [0, 0.01, 0.03, 0.1, 0.3]: noise = (torch.randn_like(pt_gray_img) * sigma) noisy_pt_gray_img = (pt_gray_img + noise).clamp(0, 1) noisy_np_gray_img = noisy_pt_gray_img.numpy() skimage_ssim = structural_similarity(noisy_np_gray_img, np_gray_img, win_size=11, sigma=1.5, use_sample_covariance=False, gaussian_weights=True, data_range=1) adv_lib_ssim = compute_ssim(noisy_pt_gray_img.unsqueeze(0).unsqueeze(1), pt_gray_img.unsqueeze(0).unsqueeze(1)) abs_diff = abs((skimage_ssim - adv_lib_ssim.item())) assert (abs_diff < 2e-05)
class RoCBertForQuestionAnswering(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class SparseFastSigmoid(torch.autograd.Function): '\n Surrogate gradient of the Heaviside step function.\n\n **Forward pass:** Heaviside step function shifted.\n\n .. math::\n\n S=\\begin{cases} 1 & \\text{if U U$_{\\rm thr}$} \\\\\n 0 & \\text{if U < U$_{\\rm thr}$}\n \\end{cases}\n\n **Backward pass:** Gradient of fast sigmoid function clipped below B.\n\n .. math::\n\n S&\\frac{U}{1 + k|U|}H(U-B) \\\\\n \\frac{S}{U}&=\\begin{cases} \\frac{1}{(1+k|U|)^2}\n & \\text{\\rm if U > B}\n 0 & \\text{\\rm otherwise}\n \\end{cases}\n\n :math:`k` defaults to 25, and can be modified by calling ``surrogate.SFS(slope=25)``.\n :math:`B` defaults to 1, and can be modified by calling ``surrogate.SFS(B=1)``.\n\n Adapted from:\n\n *N. Perez-Nieves and D.F.M. Goodman (2021) Sparse Spiking\n Gradient Descent. def forward(ctx, input_, slope=25, B=1): ctx.save_for_backward(input_) ctx.slope = slope ctx.B = B out = (input_ > 0).float() return out def backward(ctx, grad_output): (input_,) = ctx.saved_tensors grad_input = grad_output.clone() grad = ((grad_input / (((ctx.slope * torch.abs(input_)) + 1.0) ** 2)) * (input_ > ctx.B).float()) return (grad, None, None)
def build_from_path(in_dir, out_dir, num_workers=1): executor = ProcessPoolExecutor(max_workers=num_workers) futures = [] index = 1 with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f: for line in f: parts = line.strip().split('|') wav_path = os.path.join(in_dir, 'wavs', ('%s.wav' % parts[0])) text = parts[2] futures.append(executor.submit(partial(_process_utterance, out_dir, index, wav_path, text))) index += 1 return [future.result() for future in futures]
def save_results_mimic(results_file_path, repetition_num, match_mean, auc_score, apr_score): with open(results_file_path, 'a') as f: writer = csv.writer(f) writer.writerow((repetition_num, match_mean, auc_score, apr_score))
class DenseBlock(nn.Module): def __init__(self, in_channels, out_channels, bottleneck_size): super(DenseBlock, self).__init__() inc_channels = ((out_channels - in_channels) // 2) mid_channels = (inc_channels * bottleneck_size) self.branch1 = PeleeBranch1(in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels) self.branch2 = PeleeBranch2(in_channels=in_channels, out_channels=inc_channels, mid_channels=mid_channels) def forward(self, x): x1 = self.branch1(x) x2 = self.branch2(x) x = torch.cat((x, x1, x2), dim=1) return x
def _get_mcmc_fns(run_config: ConfigDict, log_psi_apply: ModelApply[P], apply_pmap: bool=True) -> Tuple[(mcmc.metropolis.BurningStep[(P, dwpa.DWPAData)], mcmc.metropolis.WalkerFn[(P, dwpa.DWPAData)])]: metrop_step_fn = dwpa.make_dynamic_pos_amp_gaussian_step(log_psi_apply, run_config.nmoves_per_width_update, dwpa.make_threshold_adjust_std_move(0.5, 0.05, 0.1)) burning_step = mcmc.metropolis.make_jitted_burning_step(metrop_step_fn, apply_pmap=apply_pmap) walker_fn = mcmc.metropolis.make_jitted_walker_fn(run_config.nsteps_per_param_update, metrop_step_fn, apply_pmap=apply_pmap) return (burning_step, walker_fn)
class _SyncBatchNorm(_BatchNorm): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True): super(_SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) self._sync_master = SyncMaster(self._data_parallel_master) self._parallel_id = None self._slave_pipe = None def forward(self, input): if (not self.training): return batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps) input_shape = input.size() input = input.view(input_shape[0], self.num_features, (- 1)) N = (input.size(0) * input.size(2)) (xsum, xsqsum) = sum_square(input) if (self._parallel_id == 0): (mean, inv_std) = self._sync_master.run_master(_ChildMessage(xsum, xsqsum, N)) else: (mean, inv_std) = self._slave_pipe.run_slave(_ChildMessage(xsum, xsqsum, N)) return batchnormtrain(input, mean, (1.0 / inv_std), self.weight, self.bias).view(input_shape) def __data_parallel_replicate__(self, ctx, copy_id): self._parallel_id = copy_id if (self._parallel_id == 0): ctx.sync_master = self._sync_master else: self._slave_pipe = ctx.sync_master.register_slave(copy_id) def _data_parallel_master(self, intermediates): intermediates = sorted(intermediates, key=(lambda i: i[1].sum.get_device())) to_reduce = [i[1][:2] for i in intermediates] to_reduce = [j for i in to_reduce for j in i] target_gpus = [i[1].sum.get_device() for i in intermediates] sum_size = sum([i[1].sum_size for i in intermediates]) (sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) (mean, inv_std) = self._compute_mean_std(sum_, ssum, sum_size) broadcasted = Broadcast.apply(target_gpus, mean, inv_std) outputs = [] for (i, rec) in enumerate(intermediates): outputs.append((rec[0], _MasterMessage(*broadcasted[(i * 2):((i * 2) + 2)]))) return outputs def _compute_mean_std(self, sum_, ssum, size): assert (size > 1), 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' mean = (sum_ / size) sumvar = (ssum - (sum_ * mean)) unbias_var = (sumvar / (size - 1)) bias_var = (sumvar / size) self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mean.data)) self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * unbias_var.data)) return (mean, ((bias_var + self.eps) ** (- 0.5)))
class _Trainer(): def __init__(self, c): if (c.SEED is None): c.SEED = torch.initial_seed() else: torch.manual_seed(c.SEED) np.random.seed(c.SEED) c.get_outdirs() c.save_constants_file() print(c) if ((c.DEVICE != 'cpu') and torch.cuda.is_available()): device = torch.device(('cuda:%i' % c.DEVICE)) torch.cuda.set_device(c.DEVICE) else: device = torch.device('cpu') print(('Device: %s' % device)) torch.backends.cudnn.benchmark = False torch.set_num_threads(1) print(('Main thread ID: %i' % os.getpid())) print('Torch seed: ', torch.initial_seed()) writer = SummaryWriter(c.SUMMARY_OUT_DIR) writer.add_text('constants', str(c).replace('\n', ' \n')) (self.c, self.device, self.writer) = (c, device, writer) self.need_mask = hasattr(self.c.P, 'mask_x') self.need_bd = hasattr(self.c.P, 'sample_bd') self.need_od = hasattr(self.c.P, 'sample_data') def _print_summary(self, i, loss, rate, start): print(('[i: %i/%i] loss: %.4f rate: %.1f elapsed: %.2f hr %s %s\n' % ((i + 1), self.c.N_STEPS, loss, rate, ((time.time() - start) / (60 * 60)), time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()), self.c.RUN))) self.writer.add_scalar('rate/', rate, (i + 1)) def _save_figs(self, i, fs): if self.c.CLEAR_OUTPUT: IPython.display.clear_output(wait=True) for (name, f) in fs: if self.c.SAVE_FIGURES: f.savefig((self.c.SUMMARY_OUT_DIR + ('%s_%.8i.png' % (name, (i + 1)))), bbox_inches='tight', pad_inches=0.1, dpi=100) self.writer.add_figure(name, f, (i + 1), close=False) (plt.show() if self.c.SHOW_FIGURES else plt.close('all')) def _save_model(self, i, model, im=None): tag = (('model_%.8i_%.8i.torch' % ((i + 1), im)) if (im is not None) else ('model_%.8i.torch' % (i + 1))) model.eval() model.to(torch.device('cpu')) torch.save({'i': (i + 1), 'model_state_dict': model.state_dict()}, (self.c.MODEL_OUT_DIR + tag)) model.to(self.device) def train(self): raise NotImplementedError
def plot_embedding(X, Y, cid, ohidcs, A): plt.figure(figsize=(20, 20)) for i in xrange(Y.shape[0]): if (i == cid): c = 'g' s = 500 elif (Y[i] == Y[cid]): c = 'r' s = 20 else: c = 'b' s = 20 plt.scatter(X[(i, 0)], X[(i, 1)], s, color=c) edges = A.nonzero() edges = np.asarray(edges).T for e in edges: plt.plot([X[(e[0], 0)], X[(e[1], 0)]], [X[(e[0], 1)], X[(e[1], 1)]], linestyle='--', linewidth=0.5, color='gray') plt.show()
def get_split_loader(split_dataset, training=False, testing=False, weighted=False): kwargs = ({'num_workers': 4} if (device.type == 'cuda') else {}) if (not testing): if training: if weighted: weights = make_weights_for_balanced_classes_split(split_dataset) loader = DataLoader(split_dataset, batch_size=1, sampler=WeightedRandomSampler(weights, len(weights)), collate_fn=collate_MIL, **kwargs) else: loader = DataLoader(split_dataset, batch_size=1, sampler=RandomSampler(split_dataset), collate_fn=collate_MIL, **kwargs) else: loader = DataLoader(split_dataset, batch_size=1, sampler=SequentialSampler(split_dataset), collate_fn=collate_MIL, **kwargs) else: ids = np.random.choice(np.arange(len(split_dataset), int((len(split_dataset) * 0.1))), replace=False) loader = DataLoader(split_dataset, batch_size=1, sampler=SubsetSequentialSampler(ids), collate_fn=collate_MIL, **kwargs) return loader
def set_window_pos_callback(window, cbfun): window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if (window_addr in _window_pos_callback_repository): previous_callback = _window_pos_callback_repository[window_addr] else: previous_callback = None if (cbfun is None): cbfun = 0 c_cbfun = _GLFWwindowposfun(cbfun) _window_pos_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetWindowPosCallback(window, cbfun) if ((previous_callback is not None) and (previous_callback[0] != 0)): return previous_callback[0]
def get_dataset(name, split='train', transform=None, target_transform=None, download=True, datasets_path='~/Datasets'): train = (split == 'train') root = os.path.join(os.path.expanduser(datasets_path), name) if (name == 'cifar10'): return datasets.CIFAR10(root=root, train=train, transform=transform, target_transform=target_transform, download=download) elif (name == 'cifar100'): return datasets.CIFAR100(root=root, train=train, transform=transform, target_transform=target_transform, download=download) elif (name == 'mnist'): return datasets.MNIST(root=root, train=train, transform=transform, target_transform=target_transform, download=download) elif (name == 'stl10'): return datasets.STL10(root=root, split=split, transform=transform, target_transform=target_transform, download=download) elif (name == 'imagenet'): if train: root = os.path.join(root, 'train') else: root = os.path.join(root, 'val') return datasets.ImageFolder(root=root, transform=transform, target_transform=target_transform) elif (name == 'imagenet_calib'): if train: root = os.path.join(root.replace('imagenet_calib', 'imagenet'), 'calib') else: root = os.path.join(root, 'val') return datasets.ImageFolder(root=root, transform=transform, target_transform=target_transform) elif (name == 'imagenet_calib_10K'): if train: root = os.path.join(root.replace('imagenet_calib_10K', 'imagenet'), 'calib_10K') else: root = os.path.join(root, 'val') return datasets.ImageFolder(root=root, transform=transform, target_transform=target_transform) elif (name == 'imagenet_tar'): if train: root = os.path.join(root, 'imagenet_train.tar') else: root = os.path.join(root, 'imagenet_validation.tar') return IndexedFileDataset(root, extract_target_fn=(lambda fname: fname.split('/')[0]), transform=transform, target_transform=target_transform)
def fedat_test(fed, running_model, val_loaders, val_adversaries, att_BNn, detector, loss_fun, device, client_num, set_name='Val'): acc_list = [None for _ in range(client_num)] loss_mt = AverageMeter() for client_idx in range(client_num): fed.model_accum.load_model(running_model, client_idx) (loss, acc) = test(running_model, val_loaders[client_idx], loss_fun, device, adversary=val_adversaries[client_idx], att_BNn=att_BNn, detector=detector) loss_mt.append(loss) acc_list[client_idx] = acc print(' {:<11s}| {} Acc: {:.1f}%'.format(fed.clients[client_idx], set_name, (acc * 100))) return ({f'loss': loss_mt.avg, f'acc': np.mean(acc_list)}, acc_list)
def test_imageparam_bug(): 'see x = Var('x') y = Var('y') fx = Func('fx') input = ImageParam(UInt(8), 1, 'input') fx[(x, y)] = input[y] return
def make_sentences(root, split, file, processing): sentences = load_file(os.path.join(root, ((split + '.') + file))) return split_sentences(sentences, processing)
def build_nasnet_mobile(images, num_classes, is_training=True, final_endpoint=None, config=None, current_step=None): hparams = (mobile_imagenet_config() if (config is None) else copy.deepcopy(config)) _update_hparams(hparams, is_training) if (tf.test.is_gpu_available() and (hparams.data_format == 'NHWC')): tf.logging.info('A GPU is available on the machine, consider using NCHW data format for increased speed on GPU.') if (hparams.data_format == 'NCHW'): images = tf.transpose(images, [0, 3, 1, 2]) total_num_cells = (hparams.num_cells + 2) total_num_cells += 2 normal_cell = nasnet_utils.NasNetANormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps, hparams.use_bounded_activation) reduction_cell = nasnet_utils.NasNetAReductionCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps, hparams.use_bounded_activation) with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): return _build_nasnet_base(images, normal_cell=normal_cell, reduction_cell=reduction_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, stem_type='imagenet', final_endpoint=final_endpoint, current_step=current_step)
def run_simulation(Lx, Ly, betas=[1.0], n_updates_measure=10000, n_bins=10): (spins, op_string, bonds) = init_SSE_square(Lx, Ly) n_sites = len(spins) n_bonds = len(bonds) Es_Eerrs = [] for beta in betas: print('beta = {beta:.3f}'.format(beta=beta), flush=True) op_string = thermalize(spins, op_string, bonds, beta, (n_updates_measure // 10)) Es = [] for _ in range(n_bins): ns = measure(spins, op_string, bonds, beta, n_updates_measure) E = ((((- np.mean(ns)) / beta) + (0.25 * n_bonds)) / n_sites) Es.append(E) (E, Eerr) = (np.mean(Es), (np.std(Es) / np.sqrt(n_bins))) Es_Eerrs.append((E, Eerr)) return np.array(Es_Eerrs)
class TestTextFeature(ZooTestCase): def test_text_feature_with_label(self): feature = TextFeature(text, 1) assert (feature.get_text() == text) assert (feature.get_label() == 1) assert feature.has_label() assert (set(feature.keys()) == {'text', 'label'}) assert (feature.get_tokens() is None) assert (feature.get_sample() is None) def test_text_feature_without_label(self): feature = TextFeature(text) assert (feature.get_text() == text) assert (feature.get_label() == (- 1)) assert (not feature.has_label()) assert (feature.keys() == ['text']) feature.set_label(0.0) assert (feature.get_label() == 0) assert feature.has_label() assert (set(feature.keys()) == {'text', 'label'}) assert (feature.get_tokens() is None) assert (feature.get_sample() is None) def test_text_feature_transformation(self): feature = TextFeature(text, 0) tokenizer = Tokenizer() tokenized = tokenizer.transform(feature) assert (tokenized.get_tokens() == ['Hello', 'my', 'friend,', 'please', 'annotate', 'my', 'text']) normalizer = Normalizer() normalized = normalizer.transform(tokenized) assert (normalized.get_tokens() == ['hello', 'my', 'friend', 'please', 'annotate', 'my', 'text']) word_index = {'my': 1, 'please': 2, 'friend': 3} indexed = WordIndexer(word_index).transform(normalized) shaped = SequenceShaper(5).transform(indexed) transformed = TextFeatureToSample().transform(shaped) assert (set(transformed.keys()) == {'text', 'label', 'tokens', 'indexedTokens', 'sample'}) sample = transformed.get_sample() assert (list(sample.feature.storage) == [1.0, 3.0, 2.0, 1.0, 0.0]) assert (list(sample.label.storage) == [0.0]) def test_text_feature_with_uri(self): feature = TextFeature(uri='A1') assert (feature.get_text() is None) assert (feature.get_uri() == 'A1')
def UNTESTED_from_jsonable(ebm, jsonable): warn('JSON formats are in beta. The JSON format may change in a future version without compatibility between releases.') obj_type = f'{ebm.__class__.__module__}.{ebm.__class__.__name__}' if (obj_type == 'interpret.glassbox._ebm._ebm.EBMModel'): is_classification = None is_regression = None is_private = None elif (obj_type == 'interpret.glassbox._ebm._ebm.ExplainableBoostingClassifier'): is_classification = True is_regression = False is_private = False elif (obj_type == 'interpret.glassbox._ebm._ebm.ExplainableBoostingRegressor'): is_classification = False is_regression = True is_private = False elif (obj_type == 'interpret.glassbox._ebm._ebm.DPExplainableBoostingClassifier'): is_classification = True is_regression = False is_private = True elif (obj_type == 'interpret.glassbox._ebm._ebm.DPExplainableBoostingRegressor'): is_classification = False is_regression = True is_private = True else: msg = f'Unrecognized object type {obj_type}' _log.error(msg) raise ValueError(msg) jsonable = jsonable['ebm'] link = None for output_json in jsonable['outputs']: if (link is not None): msg = 'Multiple outputs not supported currently.' _log.error(msg) raise ValueError(msg) link = output_json['link'] link_param = UNTESTED_dejsonify_item(output_json['link_param']) task = identify_task(link) if (task == 'classification'): if (is_classification is False): msg = f'{obj_type} cannot have link function {link}.' _log.error(msg) raise ValueError(msg) classes = output_json['classes'] classes = np.array(classes, np.object_) classes = typify_classification(classes) elif (task == 'regression'): if (is_regression is False): msg = f'{obj_type} cannot have link function {link}.' _log.error(msg) raise ValueError(msg) min_target = output_json['min_target'] max_target = output_json['max_target'] else: msg = f'Unrecognized link function {link}' _log.error(msg) raise ValueError(msg) intercept = jsonable['intercept'] bagged_intercept = jsonable.get('bagged_intercept', None) noise_scale_binning = jsonable.get('noise_scale_binning', None) noise_scale_boosting = jsonable.get('noise_scale_boosting', None) bag_weights = jsonable['bag_weights'] breakpoint_iteration = jsonable['breakpoint_iteration'] intercept = np.array(intercept, np.float64) if (bagged_intercept is not None): bagged_intercept = np.array(bagged_intercept, np.float64) bag_weights = np.array(bag_weights, np.float64) breakpoint_iteration = np.array(breakpoint_iteration, np.int64) if (jsonable['implementation'] == 'python'): pass names = {} name_idx = 0 histogram_weights = [] unique_val_counts = [] bins = [] feature_names = [] feature_types = [] feature_bounds = [] for feature_json in jsonable['features']: feature_name = feature_json['name'] feature_names.append(feature_name) names[feature_name] = name_idx name_idx += 1 feature_type = feature_json['type'] feature_types.append(feature_type) num_unique_vals = feature_json['num_unique_vals'] unique_val_counts.append(num_unique_vals) if (feature_type in ['nominal', 'ordinal']): levels = [] for level in feature_json['categories']: idx = 1 feature_bins = {} for category in level: if isinstance(category, list): for item in category: feature_bins[item] = idx else: feature_bins[category] = idx idx += 1 levels.append(feature_bins) min_val = np.nan max_val = np.nan histogram = None elif (feature_type in ['continuous']): levels = [] for level in feature_json['cuts']: level = np.array(level, np.float64) levels.append(level) min_val = feature_json['min'] max_val = feature_json['max'] histogram = feature_json['histogram_weights'] histogram = np.array(histogram, np.float64) histogram_weights.append(histogram) bins.append(levels) feature_bounds.append((min_val, max_val)) unique_val_counts = np.array(unique_val_counts, np.int64) feature_bounds = np.array(feature_bounds, np.float64) term_features = [] bin_weights = [] bagged_scores = [] term_scores = [] standard_deviations = [] for term_json in jsonable['terms']: tf = term_json['term_features'] tf = tuple((names[name] for name in tf)) term_features.append(tf) scores = term_json['scores'] scores = np.array(scores, np.float64) term_scores.append(scores) stddev = term_json['standard_deviations'] stddev = np.array(stddev, np.float64) standard_deviations.append(stddev) bs = term_json['bagged_scores'] bs = np.array(bs, np.float64) bagged_scores.append(bs) bw = term_json['bin_weights'] bw = np.array(bw, np.float64) bin_weights.append(bw) term_names = generate_term_names(feature_names, term_features) histogram_edges = make_all_histogram_edges(feature_bounds, histogram_weights) ebm.n_features_in_ = len(bins) ebm.term_names_ = term_names if (is_private is not False): ebm.noise_scale_binning_ = noise_scale_binning ebm.noise_scale_boosting_ = noise_scale_boosting if (is_private is not True): ebm.histogram_edges_ = histogram_edges ebm.histogram_weights_ = histogram_weights ebm.unique_val_counts_ = unique_val_counts if (task == 'classification'): ebm.classes_ = classes elif (task == 'regression'): ebm.min_target_ = min_target ebm.max_target_ = max_target ebm.bins_ = bins ebm.feature_names_in_ = feature_names ebm.feature_types_in_ = feature_types ebm.feature_bounds_ = feature_bounds ebm.term_features_ = term_features ebm.bin_weights_ = bin_weights ebm.bagged_scores_ = bagged_scores ebm.term_scores_ = term_scores ebm.standard_deviations_ = standard_deviations ebm.intercept_ = intercept if (bagged_intercept is not None): ebm.bagged_intercept_ = bagged_intercept ebm.link_ = link ebm.link_param_ = link_param ebm.bag_weights_ = bag_weights ebm.breakpoint_iteration_ = breakpoint_iteration ebm.has_fitted_ = True
class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
def main(A, t_max, M, R, exec_type, theta): print('{}-armed Bernoulli bandit with MC-BUCB policies for {} time-instants and {} realizations'.format(A, M, t_max, R)) dir_string = '../results/{}/A={}/t_max={}/R={}/M={}/theta={}'.format(os.path.basename(__file__).split('.')[0], A, t_max, R, M, '_'.join(str.strip(np.array_str(theta.flatten()), '[]').split())) os.makedirs(dir_string, exist_ok=True) context = None reward_function = {'type': 'bernoulli', 'dist': stats.bernoulli, 'theta': theta} reward_prior = {'dist': stats.beta, 'alpha': np.ones((A, 1)), 'beta': np.ones((A, 1))} min_sampling_sigma = 1e-06 mc_reward_prior = {'dist': stats.beta, 'alpha': np.ones((A, 1)), 'beta': np.ones((A, 1)), 'sampling': 'density', 'sampling_sigma': min_sampling_sigma, 'M': M} bandits = [] bandits_labels = [] alpha = (1.0 / np.arange(1, (t_max + 1))) quantileInfo = {'alpha': alpha, 'type': 'analytical'} bandits.append(BayesianBanditQuantiles(A, reward_function, reward_prior, quantileInfo)) bandits_labels.append('UCB, alpha=1/t') quantileInfo = {'MC_alpha': 'alpha', 'alpha': alpha, 'type': 'empirical'} bandits.append(MCBanditQuantiles(A, reward_function, mc_reward_prior, quantileInfo)) bandits_labels.append('MC-BUCB, alpha=1/t, M_theta={}'.format(mc_reward_prior['M'])) quantileInfo = {'MC_alpha': 'alpha_plus_mcsigma', 'alpha': alpha, 'type': 'empirical'} bandits.append(MCBanditQuantiles(A, reward_function, mc_reward_prior, quantileInfo)) bandits_labels.append('MC-BUCB, alpha=1/t+sigma, M_theta={}'.format(mc_reward_prior['M'])) quantileInfo = {'MC_alpha': 'alpha_times_mcsigma', 'alpha': alpha, 'type': 'empirical'} bandits.append(MCBanditQuantiles(A, reward_function, mc_reward_prior, quantileInfo)) bandits_labels.append('MC-BUCB, alpha=1/t*sigma, M_theta={}'.format(mc_reward_prior['M'])) for (n, bandit) in enumerate(bandits): bandit.execute_realizations(R, t_max, context, exec_type) with open((dir_string + '/bandits.pickle'), 'wb') as f: pickle.dump(bandits, f) with open((dir_string + '/bandits_labels.pickle'), 'wb') as f: pickle.dump(bandits_labels, f) bandits_colors = [colors.cnames['black'], colors.cnames['blue'], colors.cnames['green'], colors.cnames['red'], colors.cnames['fuchsia']] dir_plots = (dir_string + '/plots') os.makedirs(dir_plots, exist_ok=True) t_plot = t_max plot_std = False bandits_plot_regret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = True bandits_plot_regret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = False bandits_plot_cumregret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = True bandits_plot_cumregret(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = True bandits_plot_rewards_expected(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = False bandits_plot_actions(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = True bandits_plot_actions(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = False bandits_plot_actions_correct(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = True bandits_plot_actions_correct(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots) plot_std = True bandits_plot_arm_quantile(bandits, bandits_colors, bandits_labels, t_plot, plot_std, plot_save=dir_plots)
def _extrapolate(img, class_info, magnitude): m = float_parameter(magnitude, 1) x = img mu = class_info['mean'] x_hat = (((x - mu) * m) + x) return (x_hat, [])
class QResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(QResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if (replace_stride_with_dilation is None): replace_stride_with_dilation = [False, False, False] if (len(replace_stride_with_dilation) != 3): raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x
def test_intersection_module() -> None: box1 = TFBoxTensor(tf.Variable([[[1, 1], [3, 5]], [[1, 1], [3, 3]]])) box2 = TFBoxTensor(tf.Variable([[[2, 0], [6, 2]], [[3, 2], [4, 4]]])) res = TFBoxTensor(tf.Variable([[[2, 1], [3, 2]], [[3, 2], [3, 3]]])) assert (res == TFHardIntersection()(box1, box2))
class HugginfaceBertEncoderMapper(SimpleMapper): RULES = [RegexRule('layer\\.(\\d+)\\.attention\\.self\\.(query|key|value)', 'layers.\\1.attention.\\2_projection'), RegexRule('layer\\.(\\d+)\\.attention\\.output\\.dense', 'layers.\\1.attention.out_projection'), RegexRule('layer\\.(\\d+)\\.attention\\.output\\.LayerNorm', 'layers.\\1.norm1'), RegexRule('layer\\.(\\d+)\\.intermediate\\.dense', 'layers.\\1.linear1'), RegexRule('layer\\.(\\d+)\\.output\\.dense', 'layers.\\1.linear2'), RegexRule('layer\\.(\\d+)\\.output\\.LayerNorm', 'layers.\\1.norm2')] def __init__(self): super(HugginfaceBertEncoderMapper, self).__init__(self.RULES)
_module() class DynamicMVXFasterRCNN(MVXTwoStageDetector): def __init__(self, **kwargs): super(DynamicMVXFasterRCNN, self).__init__(**kwargs) _grad() _fp32() def voxelize(self, points): coors = [] for res in points: res_coors = self.pts_voxel_layer(res) coors.append(res_coors) points = torch.cat(points, dim=0) coors_batch = [] for (i, coor) in enumerate(coors): coor_pad = F.pad(coor, (1, 0), mode='constant', value=i) coors_batch.append(coor_pad) coors_batch = torch.cat(coors_batch, dim=0) return (points, coors_batch) def extract_pts_feat(self, points, img_feats, img_metas): if (not self.with_pts_bbox): return None (voxels, coors) = self.voxelize(points) (voxel_features, feature_coors) = self.pts_voxel_encoder(voxels, coors, points, img_feats, img_metas) batch_size = (coors[((- 1), 0)] + 1) x = self.pts_middle_encoder(voxel_features, feature_coors, batch_size) x = self.pts_backbone(x) if self.with_pts_neck: x = self.pts_neck(x) return x
def load_model(model, dir): model_dict = model.state_dict() print('loading model from :', dir) pretrained_dict = torch.load(dir)['params'] if ('encoder' in list(pretrained_dict.keys())[0]): if ('module' in list(pretrained_dict.keys())[0]): pretrained_dict = {k[7:]: v for (k, v) in pretrained_dict.items()} else: pretrained_dict = {k: v for (k, v) in pretrained_dict.items()} else: pretrained_dict = {('encoder.' + k): v for (k, v) in pretrained_dict.items()} pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)} model_dict.update(pretrained_dict) model.load_state_dict(model_dict) return model
class TensorflowModelZooBertDataLoader(DefaultDataLoader): def _generate_dataloader(self, dataset, batch_size, last_batch, collate_fn, sampler, batch_sampler, num_workers, pin_memory, shuffle, distributed): if shuffle: logging.warning('Shuffle is not supported yet in TensorflowBertDataLoader, ignoring shuffle keyword.') def bert_collate_fn(batch): input_ids = [] input_mask = [] segment_ids = [] for elem in batch: input_ids.append(elem[0][0][0]) input_mask.append(elem[0][1][0]) segment_ids.append(elem[0][2][0]) inputs = [input_ids, input_mask, segment_ids] return (inputs, batch[0][1]) drop_last = (False if (last_batch == 'rollover') else True) sampler = self._generate_sampler(dataset, distributed) self.batch_sampler = BatchSampler(sampler, batch_size, drop_last) self.fetcher = FETCHERS[self.dataset_type](dataset, bert_collate_fn, drop_last, distributed) inputs = [] for batched_indices in self.batch_sampler: try: data = self.fetcher(batched_indices) (yield data) except StopIteration: return
def pspnet_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, multi_output=True).features del backbone[(- 1)] return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name='pspnet_resnetd50b_coco', **kwargs)
def block(mod, output, stride): inp = mod.get_current()[1][(- 1)] aa = mod.get_current() if (inp == output): if (stride == 1): l0 = mod.get_current() else: l0 = mod.maxpoolLayer(stride) else: l0 = mod.convLayer(1, output, activation=M.PARAM_RELU, batch_norm=True, stride=stride) mod.convLayer(1, (output // 4), activation=M.PARAM_RELU, batch_norm=True, layerin=aa, stride=stride) mod.convLayer(3, (output // 4), activation=M.PARAM_RELU, batch_norm=True) mod.convLayer(1, output, batch_norm=True) mod.sum(l0) mod.activate(M.PARAM_RELU) return mod
class ChannelPool(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
def set_cursor_pos_callback(window, cbfun): window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if (window_addr in _cursor_pos_callback_repository): previous_callback = _cursor_pos_callback_repository[window_addr] else: previous_callback = None if (cbfun is None): cbfun = 0 c_cbfun = _GLFWcursorposfun(cbfun) _cursor_pos_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetCursorPosCallback(window, cbfun) if ((previous_callback is not None) and (previous_callback[0] != 0)): return previous_callback[0]
def main(config_, save_path): global config, log, writer config = config_ (log, writer) = utils.set_save_path(save_path) with open(os.path.join(save_path, 'config.yaml'), 'w') as f: yaml.dump(config, f, sort_keys=False) (train_loader, val_loader) = make_data_loaders() if (config.get('data_norm') is None): config['data_norm'] = {'inp': {'sub': [0], 'div': [1]}, 'gt': {'sub': [0], 'div': [1]}} (model, optimizer, epoch_start, lr_scheduler) = prepare_training() n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(',')) if (n_gpus > 1): model = nn.parallel.DataParallel(model) epoch_max = config['epoch_max'] epoch_val = config.get('epoch_val') epoch_save = config.get('epoch_save') max_vals = [(- 1e+18) for _ in range(len(val_loader.dataset.scale_max))] timer = utils.Timer() for epoch in range(epoch_start, (epoch_max + 1)): t_epoch_start = timer.t() log_info = ['epoch {}/{}'.format(epoch, epoch_max)] writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch) log_info.append('lr:{}'.format(optimizer.param_groups[0]['lr'])) train_losses = train(train_loader, model, optimizer) if (lr_scheduler is not None): lr_scheduler.step() log_train_msg = 'train: loss={:.4f}'.format(train_losses[0]) writer.add_scalars('loss_1', {'train': train_losses[0]}, epoch) for idx in range(1, len(train_losses)): log_train_msg += ', {:.4f}'.format(train_losses[idx]) writer.add_scalars('loss_{}'.format((idx + 1)), {'train': train_losses[idx]}, epoch) log_info.append(log_train_msg) if (n_gpus > 1): model_ = model.module else: model_ = model model_spec = config['model'] model_spec['sd'] = model_.state_dict() optimizer_spec = config['optimizer'] optimizer_spec['sd'] = optimizer.state_dict() state = torch.get_rng_state() sv_file = {'model': model_spec, 'optimizer': optimizer_spec, 'epoch': epoch, 'state': state} torch.save(sv_file, os.path.join(save_path, 'epoch-last.pth')) if ((epoch_save is not None) and ((epoch % epoch_save) == 0)): torch.save(sv_file, os.path.join(save_path, 'epoch-{}.pth'.format(epoch))) with torch.no_grad(): if ((epoch_val is not None) and ((epoch % epoch_val) == 0)): if ((n_gpus > 1) and (config.get('eval_bsize') is not None)): model_ = model.module else: model_ = model val_res = eval_psnr(val_loader, model_, data_norm=config['data_norm'], eval_type=config.get('eval_type'), eval_bsize=config.get('eval_bsize')) log_val_msg = 'val: psnr={:.4f}'.format(val_res[0]) writer.add_scalars('psnr_1', {'val': val_res[0]}, epoch) for idx in range(1, len(val_res)): log_val_msg += ', {:.4f}'.format(val_res[idx]) writer.add_scalars('psnr_{}'.format((idx + 1)), {'val': val_res[idx]}, epoch) log_info.append(log_val_msg) if (val_res > max_vals): max_vals = val_res torch.save(sv_file, os.path.join(save_path, 'epoch-best.pth')) t = timer.t() prog = (((epoch - epoch_start) + 1) / ((epoch_max - epoch_start) + 1)) t_epoch = utils.time_text((t - t_epoch_start)) (t_elapsed, t_all) = (utils.time_text(t), utils.time_text((t / prog))) log_info.append('{} {}/{}'.format(t_epoch, t_elapsed, t_all)) log(', '.join(log_info)) writer.flush()
def test_amuse_NFWPotential(): np = potential.NFWPotential(normalize=1.0, a=3.0) tmax = 3.0 (vo, ro) = (200.0, 7.0) o = Orbit([1.0, 0.5, 1.3, 0.3, 0.1, 0.4], ro=ro, vo=vo) run_orbitIntegration_comparison(o, np, tmax, vo, ro) return None
def ndim(x): dims = x.get_shape()._dims if (dims is not None): return len(dims) return None
class ConditionalImageHusky(ConditionalImage): def __init__(self, taskdef, *args, **kwargs): super(ConditionalImageHusky, self).__init__(taskdef, *args, **kwargs) self.num_options = HuskyNumOptions() self.null_option = HuskyNullOption() def _makeModel(self, image, pose, *args, **kwargs): img_shape = image.shape[1:] pose_size = pose.shape[(- 1)] img_in = Input(img_shape, name='predictor_img_in') img0_in = Input(img_shape, name='predictor_img0_in') label_in = Input((1,)) ins = [img0_in, img_in] encoder = MakeImageEncoder(self, img_shape) decoder = MakeImageDecoder(self, self.hidden_shape) LoadEncoderWeights(self, encoder, decoder, gan=False) h = encoder([img0_in, img_in]) next_option_in = Input((1,), name='next_option_in') next_option_in2 = Input((1,), name='next_option_in2') ins += [next_option_in, next_option_in2] y = OneHot(self.num_options)(next_option_in) y = Flatten()(y) y2 = OneHot(self.num_options)(next_option_in2) y2 = Flatten()(y2) x = h tform = self._makeTransform() x = tform([h, y]) x2 = tform([x, y2]) image_out = decoder([x]) image_out2 = decoder([x2]) if self.validate: self.loadValidationModels(pose_size, h0, h) if (not self.no_disc): image_discriminator = LoadGoalClassifierWeights(self, make_classifier_fn=MakeImageClassifier, img_shape=img_shape) disc_out2 = image_discriminator([img0_in, image_out2]) if self.no_disc: disc_wt = 0.0 else: disc_wt = 0.001 if self.no_disc: model = Model((ins + [label_in]), [image_out, image_out2]) model.compile(loss=[self.loss, self.loss], loss_weights=[1.0, 1.0], optimizer=self.getOptimizer()) else: model = Model((ins + [label_in]), [image_out, image_out2, disc_out2]) model.compile(loss=[self.loss, self.loss, 'categorical_crossentropy'], loss_weights=[1.0, 1.0, disc_wt], optimizer=self.getOptimizer()) self.model = model def loadValidationModels(self, pose_size, h0, h): pose_in = Input((pose_size,)) label_in = Input((1,)) print('>>> GOAL_CLASSIFIER') image_discriminator = LoadGoalClassifierWeights(self, make_classifier_fn=MakeImageClassifier, img_shape=(64, 64, 3)) image_discriminator.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=self.getOptimizer()) self.discriminator = image_discriminator print('>>> VALUE MODEL') self.value_model = GetValueModel(h, self.num_options, 128, self.decoder_dropout_rate) self.value_model.compile(loss='mae', optimizer=self.getOptimizer()) self.value_model.load_weights(self.makeName('secondary', 'value')) print('>>> NEXT MODEL') self.next_model = GetNextModel(h, self.num_options, 128, self.decoder_dropout_rate) self.next_model.compile(loss='mae', optimizer=self.getOptimizer()) self.next_model.load_weights(self.makeName('secondary', 'next')) print('>>> ACTOR MODEL') self.actor = GetHuskyActorModel(h, self.num_options, pose_size, self.decoder_dropout_rate) self.actor.compile(loss='mae', optimizer=self.getOptimizer()) self.actor.load_weights(self.makeName('secondary', 'actor')) print('>>> POSE MODEL') self.pose_model = GetHuskyPoseModel(h, self.num_options, pose_size, self.decoder_dropout_rate) self.pose_model.compile(loss='mae', optimizer=self.getOptimizer()) self.pose_model.load_weights(self.makeName('secondary', 'pose')) print('>>> Q MODEL') self.q_model = GetNextModel(h, self.num_options, 128, self.decoder_dropout_rate) self.q_model.compile(loss='mae', optimizer=self.getOptimizer()) self.q_model.load_weights(self.makeName('secondary', 'q')) def _getData(self, *args, **kwargs): return GetConditionalHuskyData(self.validate, self.no_disc, self.num_options, *args, **kwargs)
def read_compress_raw_img(img_path): img = cv2.imread(img_path) encoded_img_arr = cv2.imencode('.jpg', img)[1] return encoded_img_arr
.dataclass class FlaxSeq2SeqModelOutput(ModelOutput): last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
def load_network(params, device): state = Checkpoints.load_network(params['path']) return initialize_network(None, device, state, params['runtime'])
class PPOConfig(): exp_name: str = os.path.basename(sys.argv[0])[:(- len('.py'))] seed: int = 0 log_with: Optional[Literal[('wandb', 'tensorboard')]] = None task_name: Optional[str] = None model_name: Optional[str] = None query_dataset: Optional[str] = None reward_model: Optional[str] = None remove_unused_columns: bool = True tracker_kwargs: JSONDict = field(default_factory=dict) accelerator_kwargs: JSONDict = field(default_factory=dict) project_kwargs: JSONDict = field(default_factory=dict) tracker_project_name: str = 'trl' push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict) steps: int = 20000 learning_rate: float = 1e-05 adap_kl_ctrl: bool = True init_kl_coef: Optional[float] = 0.2 kl_penalty: Literal[('kl', 'abs', 'mse', 'full')] = 'kl' target: Optional[float] = 6 horizon: Optional[float] = 10000 gamma: float = 1 lam: float = 0.95 cliprange: float = 0.2 cliprange_value: float = 0.2 vf_coef: float = 0.1 batch_size: int = 256 mini_batch_size: int = 1 gradient_accumulation_steps: int = 1 world_size: tyro.conf.Suppress[int] = None ppo_epochs: int = 4 max_grad_norm: Optional[float] = None optimize_device_cache: Optional[bool] = False early_stopping: bool = False target_kl: float = 1 compare_steps: int = 1 ratio_threshold: float = 10.0 use_score_scaling: bool = False use_score_norm: bool = False score_clip: Optional[float] = None whiten_rewards: bool = False is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None is_peft_model: Optional[tyro.conf.Suppress[bool]] = None backward_batch_size: tyro.conf.Suppress[int] = None global_backward_batch_size: tyro.conf.Suppress[int] = None global_batch_size: tyro.conf.Suppress[int] = None use_habana: bool = False pad_for_acceleration: bool = False pad_max_len: int = 0 pad_max_input_len: int = 0 def __post_init__(self): self.backward_batch_size = (self.mini_batch_size * self.gradient_accumulation_steps) exact_div(self.batch_size, self.backward_batch_size, '`batch_size`', '`mini_batch_size * gradient_accumulation_steps`', '`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`') self.total_ppo_epochs = int(np.ceil((self.steps / self.batch_size))) if self.pad_for_acceleration: if (self.pad_max_input_len == 0): raise AssertionError('pad_max_input_len ({self.pad_max_input_len}) must be set for pad input ') if (self.pad_max_input_len >= self.pad_max_len): raise AssertionError('pad_max_input_len ({self.pad_max_input_len}) must be smaller then pad_max_len ({self.pad_max_len})') if self.use_habana: from optimum.habana.transformers.modeling_utils import adapt_transformers_to_gaudi adapt_transformers_to_gaudi() assert (self.kl_penalty in ['kl', 'abs', 'mse', 'full']) def to_dict(self): output_dict = {} for (key, value) in self.__dict__.items(): output_dict[key] = value return flatten_dict(output_dict)
def combine_and_merge_gold_pred(input_gold_conll, input_pred_conll): all_files = Read_txt_Files_in_Input_Folder(input_gold_conll) combined_pred_file = 'predictions.txt' fout = open(combined_pred_file, 'w') fout.close() for file in all_files: file_name = file.split('/')[(- 1)] gold_file_loc = (input_gold_conll + file_name) pred_file_loc = (input_pred_conll + file_name) write_to_combined_file(gold_file_loc, pred_file_loc, combined_pred_file) return combined_pred_file
class RefineNet(Network): def setup(self): self.feed('color_image', 'depth_image').concat(axis=3, name='concat_image') self.feed('concat_image').conv_bn(3, 32, 1, name='refine_conv0').conv_bn(3, 32, 1, name='refine_conv1').conv_bn(3, 32, 1, name='refine_conv2').conv(3, 1, 1, relu=False, name='refine_conv3') self.feed('refine_conv3', 'depth_image').add(name='refined_depth_image')
def process_folder(q, data_dir, output_dir, stride=1): while True: if q.empty(): break folder = q.get() image_path = os.path.join(data_dir, folder, 'image_2/') dump_image_path = os.path.join(output_dir, folder) if (not os.path.isdir(dump_image_path)): os.makedirs(dump_image_path) f = open(os.path.join(dump_image_path, 'train.txt'), 'w') numbers = len(os.listdir(image_path)) for n in range((numbers - stride)): s_idx = n e_idx = (s_idx + stride) curr_image = imageio.imread((os.path.join(image_path, ('%.6d' % s_idx)) + '.png')) next_image = imageio.imread((os.path.join(image_path, ('%.6d' % e_idx)) + '.png')) seq_images = np.concatenate([curr_image, next_image], axis=0) imageio.imsave((os.path.join(dump_image_path, ('%.6d' % s_idx)) + '.png'), seq_images.astype('uint8')) f.write(('%s %s\n' % ((os.path.join(folder, ('%.6d' % s_idx)) + '.png'), os.path.join(folder, 'calib.txt')))) print(folder)
def gaussian_log_likelihood(mu, data, obsrv_std): log_p = (((mu - data) ** 2) / ((2 * obsrv_std) * obsrv_std)) neg_log_p = ((- 1) * log_p) return neg_log_p
def _dist_train(model, train_dataset, cfg, eval_dataset=None, vis_dataset=None, validate=False, logger=None): data_loaders = [build_data_loader(train_dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True)] if cfg.apex.synced_bn: model = apex.parallel.convert_syncbn_model(model) model = model.cuda() optimizer = build_optimizer(model, cfg.optimizer) if cfg.apex.use_mixed_precision: amp_opt_level = ('O1' if (cfg.apex.type == 'float16') else 'O0') (model, optimizer) = amp.initialize(model, optimizer, opt_level=amp_opt_level, loss_scale=cfg.apex.loss_scale) find_unused_parameters = cfg.get('find_unused_parameters', False) model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) runner = Runner(model, batch_processor, optimizer, cfg.work_dir, logger) if cfg.apex.use_mixed_precision: optimizer_config = DistApexOptimizerHook(**cfg.optimizer_config) else: optimizer_config = DistOptimizerHook(**cfg.optimizer_config) logger.info('Register Optimizer Hook...') runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, log_config={'interval': cfg.log_config['interval'], 'hooks': []}) for info in cfg.log_config['hooks']: assert (isinstance(info, dict) and ('type' in info)) if (info['type'] in ['TensorboardLoggerHook']): logger.info('Register Tensorboard Logger Hook...') runner.register_hook(TensorboardLoggerHook(interval=cfg.log_config.interval, register_logWithIter_keyword=['loss']), priority='VERY_LOW') if (info['type'] in ['TextLoggerHook']): logger.info('Register Text Logger Hook...') runner.register_hook(TextLoggerHook(interval=cfg.log_config.interval), priority='VERY_LOW') logger.info('Register SamplerSeed Hook...') runner.register_hook(DistSamplerSeedHook()) logger.info('Register EmptyCache Hook...') runner.register_hook(EmptyCacheHook(before_epoch=True, after_iter=False, after_epoch=True), priority='VERY_LOW') if validate: interval = cfg.get('validate_interval', 1) task = cfg.get('task', 'stereo') if (eval_dataset is not None): logger.info('Register Evaluation Hook...') if (task == 'stereo'): runner.register_hook(DistStereoEvalHook(cfg, eval_dataset, interval)) elif (task == 'flow'): runner.register_hook(DistFlowEvalHook(cfg, eval_dataset, interval)) if (vis_dataset is not None): logger.info('Register Visualization hook...') if (task == 'stereo'): runner.register_hook(DistStereoVisHook(vis_dataset, cfg, interval)) elif (task == 'flow'): runner.register_hook(DistFlowVisHook(vis_dataset, cfg, interval)) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def test_q3_q1_range(barrel): q3q1range = barrel.q3_q1_range() assert isinstance(q3q1range, np.ndarray)
def run_parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--task', choices=['train', 'dev', 'eval'], required=True) parser.add_argument('--output_dir', type=str, default=f'./data/train') parser.add_argument('--msmarco_dir', type=str, default=f'./data/msmarco-passage') parser.add_argument('--collection_memmap_dir', type=str, default='./data/collection_memmap') parser.add_argument('--tokenize_dir', type=str, default='./data/tokenize') parser.add_argument('--max_query_length', type=int, default=20) parser.add_argument('--max_doc_length', type=int, default=256) parser.add_argument('--eval_ckpt', type=int, default=None) parser.add_argument('--per_gpu_eval_batch_size', default=26, type=int) parser.add_argument('--per_gpu_train_batch_size', default=26, type=int) parser.add_argument('--gradient_accumulation_steps', type=int, default=2) parser.add_argument('--no_cuda', action='store_true') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--evaluate_during_training', action='store_true') parser.add_argument('--training_eval_steps', type=int, default=5000) parser.add_argument('--save_steps', type=int, default=5000) parser.add_argument('--logging_steps', type=int, default=100) parser.add_argument('--data_num_workers', default=0, type=int) parser.add_argument('--learning_rate', default=3e-06, type=float) parser.add_argument('--weight_decay', default=0.01, type=float) parser.add_argument('--warmup_steps', default=10000, type=int) parser.add_argument('--adam_epsilon', default=1e-08, type=float) parser.add_argument('--max_grad_norm', default=1.0, type=float) parser.add_argument('--num_train_epochs', default=1, type=int) args = parser.parse_args() time_stamp = time.strftime('%b-%d_%H:%M:%S', time.localtime()) args.log_dir = f'{args.output_dir}/log/{time_stamp}' args.model_save_dir = f'{args.output_dir}/models' args.eval_save_dir = f'{args.output_dir}/eval_results' return args
def test_nonlocal1d(): imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='dot_product') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='concatenation') if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) imgs = torch.randn(2, 3, 20) nonlocal_1d = NonLocal1d(3, mode='gaussian') assert (not hasattr(nonlocal_1d, 'phi')) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): imgs = imgs.cuda() nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) nonlocal_1d = NonLocal1d(3, mode='gaussian', sub_sample=True) assert (isinstance(nonlocal_1d.g, nn.Sequential) and (len(nonlocal_1d.g) == 2)) assert isinstance(nonlocal_1d.g[1], nn.MaxPool1d) assert (nonlocal_1d.g[1].kernel_size == 2) assert isinstance(nonlocal_1d.phi, nn.MaxPool1d) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape) nonlocal_1d = NonLocal1d(3, mode='dot_product', sub_sample=True) for m in [nonlocal_1d.g, nonlocal_1d.phi]: assert (isinstance(m, nn.Sequential) and (len(m) == 2)) assert isinstance(m[1], nn.MaxPool1d) assert (m[1].kernel_size == 2) if (torch.__version__ == 'parrots'): if torch.cuda.is_available(): nonlocal_1d.cuda() out = nonlocal_1d(imgs) assert (out.shape == imgs.shape)
def build_strong_weak_aug_dataset(cfg, mode='train', is_source=True, epochwise=False, logger=None): assert (mode in ['train', 'val', 'test']) logger.info('currently using strong weak augmentation!!!') iters = None if (mode == 'train'): if (not epochwise): iters = (cfg.SOLVER.MAX_ITER * cfg.SOLVER.BATCH_SIZE) if is_source: dataset = DatasetCatalog.get(cfg.DATASETS.SOURCE_TRAIN, mode, num_classes=cfg.MODEL.NUM_CLASSES, max_iters=iters, transform=transform, cfg=cfg, logger=logger) else: dataset = DatasetCatalog.get(cfg.DATASETS.TARGET_TRAIN, mode, num_classes=cfg.MODEL.NUM_CLASSES, max_iters=iters, transform=transform, cfg=cfg, logger=logger) elif (mode == 'val'): dataset = DatasetCatalog.get(cfg.DATASETS.TEST, 'val', num_classes=cfg.MODEL.NUM_CLASSES, max_iters=iters, cfg=cfg) elif (mode == 'test'): dataset = DatasetCatalog.get(cfg.DATASETS.TEST, cfg.DATASETS.TEST.split('_')[(- 1)], num_classes=cfg.MODEL.NUM_CLASSES, max_iters=iters, cfg=cfg) return dataset
def offline_actor_update(buffer, agent, actor_optimizer, encoder_optimizer, batch_size, actor_clip, update_encoder, encoder_clip, augmenter, actor_lambda, aug_mix, premade_replay_dicts=None, per=True, discrete=False, filter_=True): logs = {} loss = 0.0 for ensemble_idx in range(agent.ensemble_size): if (premade_replay_dicts is not None): replay_dict = premade_replay_dicts[ensemble_idx] else: replay_dict = lu.sample_move_and_augment(buffer=buffer, batch_size=batch_size, augmenter=augmenter, aug_mix=aug_mix, per=per) loss += lu.filtered_bc_loss(logs=logs, replay_dict=replay_dict, agent=agent, ensemble_idx=ensemble_idx, filter_=filter_, discrete=discrete, need_critic_grad=update_encoder) if actor_lambda: loss += (actor_lambda * lu.action_invariance_constraint(logs=logs, replay_dict=replay_dict, agent=agent, ensemble_idx=ensemble_idx)) loss /= agent.ensemble_size actor_optimizer.zero_grad() encoder_optimizer.zero_grad() loss.backward() if actor_clip: torch.nn.utils.clip_grad_norm_(chain(*(actor.parameters() for actor in agent.actors)), actor_clip) if encoder_clip: torch.nn.utils.clip_grad_norm_(agent.encoder.parameters(), encoder_clip) actor_optimizer.step() if update_encoder: encoder_optimizer.step() logs['losses/filtered_bc_overall_loss'] = loss.item() logs['gradients/actor_offline_grad_norm'] = lu.get_grad_norm(random.choice(agent.actors)) logs['gradients/encoder_offline_actorloss_grad_norm'] = lu.get_grad_norm(agent.encoder) if per: lu.adjust_priorities(logs, replay_dict, agent, buffer) return logs
def get_num_features(data_type, corpus_file, side): return TextDataset.get_num_features(corpus_file, side)
def list_pretrained_models_by_tag(tag: str): models = [] for k in _PRETRAINED.keys(): if (tag in _PRETRAINED[k]): models.append(k) return models
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, AdapterTrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args, adapter_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args, adapter_args) = parser.parse_args_into_dataclasses() training_args.predict_with_generate = True wandb.init(entity='lklab_kaist', project='ROE_experiments_ICLR', name=training_args.output_dir) last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) print('#### last_checkpoint ', last_checkpoint) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): pass elif (last_checkpoint is not None): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)) logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s', training_args) set_seed(training_args.seed) config = T5Config.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) config.train_task_adapters = adapter_args.train_task_adapters config.prefix_tuning = adapter_args.prefix_tuning config.attn_prefix_tuning = model_args.attn_prefix_tuning config.attn_method = model_args.attn_method config.ignore_target = model_args.ignore_target config.shared_attn = model_args.shared_attn config.prefix_num = model_args.prefix_num config.num_target = len(data_args.task_name) config.temperature = model_args.temperature config.fix_attention = model_args.fix_attention adapter_config = get_adapter_config(adapter_args, data_args, training_args, config) tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) model = T5ForConditionalGeneration.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), adapter_config=adapter_config) if (model_args.load_prefix_embeddings is True): if (model_args.prompt_embedding_path is None): for (name, param) in model.named_parameters(): if (('prefix_shared' in name) or ('prefix' in name)): shared_params = [param] else: shared_params = [] for path in model_args.prompt_embedding_path: shared_param = torch.load(path) shared_params.append(shared_param) if (model_args.target_prompt_embedding_path is not None): target_prompt_embedding = torch.load(model_args.target_prompt_embedding_path) if (model_args.attn_prefix_tuning is True): if ((training_args.do_train is True) and (model_args.shared_attn is False)): model.store_prefix_weights(shared_params) model.update_prefix_weights_single(shared_params[0]) elif ((training_args.do_train is True) and (model_args.shared_attn is True)): model.store_prefix_weights(shared_params) model.update_prefix_weights_multi(shared_params[0], num_target=config.num_target) else: model.store_prefix_weights(shared_params) model.update_prefix_weights_single(target_prompt_embedding) elif (model_args.target_prompt_embedding_path is None): model.update_prefix_weights(shared_params) else: model.update_prefix_weights(shared_params, target_prompt_embedding) if ((model_args.load_attention is True) and (model_args.attn_path is not None)): model.update_attention_weights(torch.load(model_args.attn_path)) if ((model_args.load_attention is True) and (model_args.attn_path_sub is not None)): model.update_attention_weights_sub(model_args.attn_path_sub) if ((model_args.load_layer_norm is True) and (model_args.layer_norm_dir is not None)): model.update_layer_norm_weights(model_args.layer_norm_dir) model.resize_token_embeddings(len(tokenizer)) model = modify_model_after_init(model, training_args, adapter_args, adapter_config) model_args.load_adapter_weights = True if model_args.load_adapter_weights: adapter_params = {} lst = os.listdir(os.path.join(training_args.output_dir, 'adapter_params')) for path in lst: full_path = os.path.join(training_args.output_dir, 'adapter_params', path) params = torch.load(full_path) path_ = path.split('.') path = '.'.join(path_[:(- 1)]) adapter_params[path] = params load_cnt = 0 for (name, param) in model.named_parameters(): if param.requires_grad: load_cnt += 1 param.data = adapter_params[name].cuda() print(f'load count: {load_cnt}') print(f'Finished loading {len(adapter_params)} number of adapter parameter files') data_args.dataset_name = data_args.task_name data_args.test_dataset_name = data_args.test_dataset_name data_args.dataset_config_name = data_args.dataset_config_name data_args.eval_dataset_name = ['xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'commonsense_qa', 'commonsense_qa', 'commonsense_qa', 'commonsense_qa', 'commonsense_qa', 'dream', 'dream', 'dream', 'dream', 'dream', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'quarel', 'quarel', 'quarel', 'quarel', 'quarel', 'sciq', 'sciq', 'sciq', 'sciq', 'sciq', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'app_reviews', 'app_reviews', 'app_reviews', 'app_reviews', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'dbpedia_14', 'dbpedia_14', 'dbpedia_14', 'dbpedia_14', 'adversarial_qa', 'adversarial_qa', 'adversarial_qa', 'adversarial_qa', 'adversarial_qa', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'wiki_bio', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'multi_news', 'multi_news', 'multi_news', 'multi_news', 'multi_news', 'multi_news', 'samsum', 'samsum', 'samsum', 'samsum', 'samsum', 'samsum', 'samsum'] data_args.eval_dataset_config_name = ['none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'original', 'original', 'original', 'original', 'original', 'original', 'original', 'original', 'original', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'qqp', 'qqp', 'qqp', 'qqp', 'qqp', 'qqp', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'adversarialQA', 'adversarialQA', 'adversarialQA', 'adversarialQA', 'adversarialQA', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'fullwiki', 'fullwiki', 'fullwiki', 'fullwiki', 'fullwiki', 'fullwiki', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none'] data_args.eval_prompts = ['DOC_write_summary_of_above', 'article_DOC_summary', 'DOC_how_would_you_rephrase_few_words', 'college_roommate_asked_DOC_so_I_recap', 'DOC_boils_down_to_simple_idea_that', 'summarize_DOC', 'summarize_this_DOC_summary', 'DOC_given_above_write_one_sentence', 'read_below_DOC_write_abstract', 'DOC_tldr', 'what_category_best_describe', 'fine_grained_LOC', 'fine_grained_NUM_context_first', 'fine_grained_ENTY', 'fine_grained_NUM', 'pick_the_best_descriptor', 'fine_grained_open_context_first', 'fine_grained_LOC_context_first', 'which_category_best_describes', 'fine_grained_DESC', 'trec1', 'fine_grained_ABBR', 'fine_grained_ABBR_context_first', 'trec2', 'fine_grained_HUM', 'fine_grained_open', 'fine_grained_HUM_context_first', 'fine_grained_DESC_context_first', 'question_description_option_text', 'question_description_option_id', 'rationale', 'question_option_description_text', 'aligned_with_common_sense', 'description_question_option_id', 'explain_why_human', 'generate_explanation_given_text', 'description_question_option_text', 'i_think', 'question_option_description_id', 'answer_given_question_without_options', 'question_answering', 'question_to_answer_index', 'most_suitable_answer', 'answer_to_question', 'generate-last-utterance', 'answer-to-dialogue', 'generate-first-utterance', 'baseline', 'read_the_following_conversation_and_answer_the_question', 'context_question_answer_description_id', 'context_question_answer_description_text', 'description_context_question_answer_id', 'context_question_description_answer_text', 'context_question_description_text', 'context_description_question_text', 'context_question_description_answer_id', 'no_prompt_id', 'context_description_question_answer_id', 'description_context_question_text', 'no_prompt_text', 'context_description_question_answer_text', 'description_context_question_answer_text', 'use_info_from_question_paragraph', 'paragraph_question_plain_concat', 'use_info_from_paragraph_question', 'answer_question_based_on', 'answer_question_below', 'read_passage_below_choose', 'having_read_above_passage', 'given_the_fact_answer_the_q', 'I was wondering', 'Show choices and generate answer', 'Check if a random answer is valid or not', 'Generate the question from the answer', 'Generate answer', 'Show choices and generate index', 'what_might_be_the_first_step_of_the_process', 'what_might_be_the_last_step_of_the_process', 'what_is_the_missing_first_step', 'what_is_the_final_step_of_the_following_process', 'effect_with_string_answer', 'which_of_the_following_is_the_supposed_perturbation', 'effect_with_label_answer', 'does_the_supposed_perturbation_have_an_effect', 'context_answer_to_question', 'description_context_question_answer_text', 'description_context_question_text', 'description_context_question_answer_id', 'context_description_question_answer_text', 'no_prompt_id', 'context_question_description_text', 'no_prompt_text', 'context_description_question_answer_id', 'context_question_description_answer_id', 'context_description_question_text', 'context_question_description_answer_text', 'only_question_answer', 'is_correct_1', 'qa_with_separated_facts_1', 'qa_with_separated_facts_3', 'qa_with_separated_facts_4', 'qa_with_separated_facts_5', 'qa_with_combined_facts_1', 'is_correct_2', 'qa_with_separated_facts_2', 'do_not_use', 'logic_test', 'heres_a_story', 'choose_between', 'testing_students', 'Direct Question (Closed Book)', 'Multiple Choice (Closed Book)', 'Multiple Choice Question First', 'Multiple Choice', 'Direct Question', 'choose_best_object_interrogative_1', 'explain_relation', 'generate_object', 'generate_subject', 'choose_best_object_affirmative_1', 'choose_best_object_affirmative_3', 'generate_subject_and_object', 'choose_best_object_affirmative_2', 'choose_best_object_interrogative_2', 'Is_this_review', 'User_recommend_this_product', 'Is_this_product_review_positive', 'Is_this_review_negative', 'convey_negative_or_positive_sentiment', 'negative_or_positive_tone', 'user_satisfied', 'would_you_buy', 'flattering_or_not', 'categorize_rating_using_review', 'generate_review', 'convert_to_star_rating', 'convert_to_rating', 'Movie Expressed Sentiment 2', 'Reviewer Opinion bad good choices', 'Sentiment with choices ', 'Reviewer Sentiment Feeling', 'Writer Expressed Sentiment', 'Movie Expressed Sentiment', 'Text Expressed Sentiment', 'Negation template for positive and negative', 'Reviewer Enjoyment Yes No', 'Reviewer Expressed Sentiment', 'Reviewer Enjoyment', 'Movie Expressed Sentiment 2', 'Reviewer Opinion bad good choices', 'Sentiment with choices ', 'Reviewer Sentiment Feeling', 'Writer Expressed Sentiment', 'Movie Expressed Sentiment', 'Text Expressed Sentiment', 'Reviewer Enjoyment Yes No', 'Reviewer Expressed Sentiment', 'Reviewer Enjoyment', 'so_i_would', 'based_on_that', 'format_star', 'this_place', 'format_score', 'on_a_scale', 'format_rating', 'task_description-no-label', 'Meaning', 'context-question-no-label', 'Rewrite-no-label', 'context-question', 'Concatenation', 'paraphrase-task', 'Concatenation-no-label', 'Meaning-no-label', 'PAWS-ANLI GPT3', 'Rewrite', 'PAWS-ANLI GPT3-no-label', 'quora', 'duplicate or not', 'same thing', 'answer', 'meaning', 'duplicate', 'generate_paraphrase', 'want to know', 'paraphrase', 'equivalent', 'generate_sentence', 'replace', 'same thing', 'classify_question_first', 'classify_with_choices_question_first', 'recommend', 'which_section_choices', 'which_section', 'classify_with_choices', 'classify', 'given_list_what_category_does_the_paragraph_belong_to', 'pick_one_category_for_the_following_text', 'given_a_choice_of_categories ', 'given_a_list_of_category_what_does_the_title_belong_to', 'generate_question', 'tell_what_it_is', 'question_context_answer', 'based_on', 'answer_the_following_q', 'Guess Answer', 'Answer Question Given Context', 'Find Answer', 'Context Contains Answer', 'Given Context Answer Question', 'What Is The Answer', 'Answer Test', 'Guess Title For Context', 'Found Context Online', 'Answer Friend Question', 'Read And Extract ', 'prompt_beginning', 'prompt_bottom_no_hint', 'prompt_bottom_hint_beginning', 'given_background_situation', 'plain_no_background', 'plain_bottom_hint', 'plain_background_situation', 'background_new_situation_answer', 'background_situation_middle', 'new_situation_background_answer', 'prompt_mix', 'read_background_situation', 'build_story_around_qa', 'decide_worth_it', 'question_answering', 'movie_director', 'generate_question', 'extract_answer', 'title_generation', 'answer_question', 'generate_question_by_answer', 'generate_answer_affirmative', 'classify_question_type', 'generate_title_affirmative', 'generate_question', 'generate_explanations_affirmative', 'generate_answer_interrogative', 'Is This True?', 'automatic_system', 'Jeopardy style', 'Topic Prediction - Question and Answer Pair', 'Generate Question from Topic', 'found_on_google', 'Topic Prediction - Question Only', 'exercise', 'Decide_good_answer', 'Topic Prediction - Answer Only', 'Direct Answer to Question', 'Given concepts - type 2', 'Put together', 'choice in concept centric sentence generation', 'random task template prompt', 'topics from the sentence', 'sentence to concepts', 'topic to sentence', 'Example prompt', 'Given concepts type 1', 'who', 'write_an_outline', 'news_summary', '2_or_3_sentences', 'tldr_summary', 'news_card_view', 'generate_story', 'sum_in_brief', 'news_stock', 'spice_up_story', 'generate_summary_for_this', 'reverse_writing', 'make_a_title', 'first_sentence_title', 'TLDR', 'write_its_sentence', 'write_a_title_for_this_sentence', 'in_a_nutshell', 'write_an_article', 'what are the key points', 'synthesize', 'summary scenario', 'summarize', 'expand (reverse task)', 'distill', 'Summarize this dialogue:', 'Given the above dialogue write a summary', 'Summarize:', 'To sum up this dialog', 'Generate a summary for this dialogue', 'Write a dialogue that match this summary', 'Sum up the following dialogue'] data_args.test_dataset_config_name = data_args.test_dataset_config_name data_args.txt_save_dir = 'output_logs_seen_eval' assert (len(data_args.dataset_name) == len(data_args.dataset_config_name)) if (data_args.eval_dataset_name is not None): assert (len(data_args.eval_dataset_name) == len(data_args.eval_dataset_config_name)) if (data_args.test_dataset_name is not None): assert (len(data_args.test_dataset_name) == len(data_args.test_dataset_config_name)) padding = ('max_length' if data_args.pad_to_max_length else False) def preprocess_function(examples, max_target_length, task_id=None): model_inputs = tokenizer(examples['source'], max_length=data_args.max_source_length, padding=padding, truncation=True) with tokenizer.as_target_tokenizer(): labels = tokenizer(examples['target'], max_length=max_target_length, padding=padding, truncation=True) if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss): labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']] model_inputs['labels'] = labels['input_ids'] model_inputs['extra_fields'] = examples['extra_fields'] if (task_id is not None): model_inputs['task_ids'] = [task_id for _ in examples['extra_fields']] return model_inputs column_names = ['source', 'target', 'extra_fields'] performance_metrics = {} eval_metrics_dict = {((dataset_name + '*') + eval_prompt): AutoTask.get(dataset_name, dataset_config_name, prompt=eval_prompt).metric for (dataset_name, dataset_config_name, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)} print('') print(data_args.eval_dataset_name) print() print(data_args.eval_dataset_config_name) print() print(eval_metrics_dict) print('') training_args.do_train = False if training_args.do_train: if (data_args.train_files is not None): train_datasets = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt, seed=data_args.data_seed).get(split='train', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_train_samples, lang=data_args.lang_name, file_name=train_file) for (dataset_name, dataset_config_name, train_file, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_files, data_args.train_prompts)] for td in train_datasets: print('') print(len(td)) print('') else: train_datasets = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt, seed=data_args.data_seed).get(split='train', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_train_samples, lang=data_args.lang_name, file_name=data_args.train_file) for (dataset_name, dataset_config_name, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_prompts)] for td in train_datasets: print('') print(len(td)) print('') max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_prompts)] for (i, train_dataset) in enumerate(train_datasets): if (model_args.shared_attn is True): train_datasets[i] = train_datasets[i].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[i], task_id=i), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache)) else: print('') print(len(train_datasets[i])) print('') train_datasets[i] = train_datasets[i].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[i]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache)) print('') print(len(train_datasets[i])) print('') print('') print(len(train_dataset)) print('') train_dataset = concatenate_datasets(train_datasets) print('') print(len(train_dataset)) print('') training_args.do_eval = True training_args.per_device_eval_batch_size = 64 data_args.max_val_samples = 300 if training_args.do_eval: max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=eval_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)] with open('/home/joel_jang/seungone/RoE/seq2seq/data/manual/seen_eval_300.json', 'r') as f: eval_datasets = {} data = json.load(f) for eval_dataset in data: if (len(data[eval_dataset]) == 0): continue for eval_prompt in data[eval_dataset]: tmp_dict = {'source': [], 'target': [], 'extra_fields': [], 'task': []} for idx in data[eval_dataset][eval_prompt]: tmp_dict['task'].append(((eval_dataset + '*') + eval_prompt)) tmp_dict['source'].append(data[eval_dataset][eval_prompt][idx]['source']) tmp_dict['target'].append(data[eval_dataset][eval_prompt][idx]['target']) if ('labels_list' in data[eval_dataset][eval_prompt][idx]): if ('labels_list' not in tmp_dict): tmp_dict['labels_list'] = [] tmp_dict['labels_list'].append(data[eval_dataset][eval_prompt][idx]['labels_list']) tmp_dict['extra_fields'].append({}) eval_datasets[((eval_dataset + '*') + eval_prompt)] = Dataset.from_dict(tmp_dict) for (k, name) in enumerate(eval_datasets): if (name == 'lama_fill_mask'): max_target_lengths[k] = 2 elif (name == 'lambada_what comes next'): max_target_lengths[k] = 1 if (model_args.shared_attn is True): eval_datasets[name] = eval_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k], task_id=k), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache)) else: eval_datasets[name] = eval_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache)) if training_args.do_test: if (data_args.test_files is not None): test_datasets = {test_dataset: AutoTask.get(test_dataset, test_dataset_config, prompt=test_prompt, seed=data_args.data_seed).get(split='test', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_test_samples, lang=data_args.lang_name, file_name=test_file) for (test_dataset, test_dataset_config, test_file, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_files, data_args.test_prompts)} else: test_datasets = {test_dataset: AutoTask.get(test_dataset, test_dataset_config, prompt=test_prompt, seed=data_args.data_seed).get(split='test', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_test_samples, lang=data_args.lang_name, file_name=data_args.test_file) for (test_dataset, test_dataset_config, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_prompts)} max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=test_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_prompts)] for (k, name) in enumerate(test_datasets): if (name == 'lama'): max_target_lengths[k] = 2 elif (name == 'lambada'): max_target_lengths[k] = 1 if (model_args.shared_attn is True): test_datasets[name] = test_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k], task_id=k), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache)) else: test_datasets[name] = test_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache)) label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id) if data_args.pad_to_max_length: data_collator = default_data_collator else: data_collator = TaskDataCollatorForSeq2Seq(tokenizer, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None)) if training_args.do_eval: data_info = {'eval': eval_datasets[((data_args.eval_dataset_name[0] + '*') + data_args.eval_prompts[0])]['extra_fields'], 'test': (test_datasets[((data_args.test_dataset_name[0] + '*') + data_args.test_prompts[0])]['extra_fields'] if training_args.do_test else None), 'train': (train_dataset['extra_fields'] if training_args.do_train else None)} else: data_info = {'train': (train_dataset['extra_fields'] if training_args.do_train else None)} def compute_metrics(eval_preds, task_name): (preds, labels, data_info, input_ids) = eval_preds decoded_input_ids = tokenizer.batch_decode(input_ids, skip_special_tokens=True) decoded_input_ids = [ii.strip() for ii in decoded_input_ids] post_processor = AutoPostProcessor.get(task_name, tokenizer, data_args.ignore_pad_token_for_loss) (decoded_preds, decoded_labels) = post_processor.process(preds, labels, data_info) result = {} eval_metrics = eval_metrics_dict[task_name] for metric in eval_metrics: result.update(metric(decoded_preds, decoded_labels)) if (os.path.isdir(f"./{data_args.txt_save_dir}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}") == False): os.mkdir(f"./{data_args.txt_save_dir}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}") with open(f"./{data_args.txt_save_dir}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}*{data_args.train_prompts[0].replace('/', ' ').replace('-', ' ')}-{task_name.replace('/', ' ').replace('-', ' ')}.txt", 'a') as f: f.write('\n') f.write(task_name) f.write('\n') for (a, b, c) in zip(decoded_preds, decoded_labels, decoded_input_ids): f.write(a) f.write(' | ') f.write(b) f.write(' | ') f.write(c) f.write('\n') f.write('>> ') for (key, value) in result.items(): f.write((((str(key) + ' : ') + str(value)) + ' | ')) f.write('\n') f.write('\n') return result if (model_args.attn_learning_rate is not None): all_parameters = set(model.parameters()) attn_params = [] for (name, param) in model.named_parameters(): if ((name == 'encoder.attn_W_up') or (name == 'encoder.attn_W_down') or (name == 'encoder.layer_norm')): attn_params += list(param) attn_params = set(attn_params) non_attn_params = (all_parameters - attn_params) non_attn_params = list(non_attn_params) attn_params = list(attn_params) optim = AdamW([{'params': non_attn_params}, {'params': attn_params, 'lr': model_args.attn_learning_rate}], lr=training_args.learning_rate) scheduler = get_linear_schedule_with_warmup(optim, num_warmup_steps=training_args.warmup_steps, num_training_steps=((len(train_dataset) * training_args.num_train_epochs) // (training_args.gradient_accumulation_steps * training_args.per_device_train_batch_size))) trainer = Seq2SeqTrainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_datasets=(eval_datasets if training_args.do_eval else None), data_info=data_info, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, evaluation_metrics=eval_metrics_dict, shared=model_args.shared_attn, optimizers=(optim, scheduler)) else: trainer = Seq2SeqTrainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_datasets=(eval_datasets if training_args.do_eval else None), data_info=data_info, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, evaluation_metrics=eval_metrics_dict, shared=model_args.shared_attn) if training_args.do_eval: print('') print(eval_datasets) print('') if trainer.is_world_process_zero(): os.makedirs(training_args.output_dir, exist_ok=True) save_training_config(sys.argv[1], training_args.output_dir) model_args.save_adapter_weights = False if model_args.save_adapter_weights: params_to_save = {} unfrozen_layers = 0 for (name, param) in trainer.model.named_parameters(): if (param.requires_grad == True): print(name) params_to_save[name] = 0 unfrozen_layers += 1 print(f'number of unfrozen layers (for beginning of training model): {unfrozen_layers}') if training_args.do_train: checkpoint = None if (training_args.resume_from_checkpoint is not None): checkpoint = training_args.resume_from_checkpoint elif (last_checkpoint is not None): checkpoint = last_checkpoint if training_args.compute_time: torch.cuda.synchronize() start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() train_result = trainer.train(resume_from_checkpoint=checkpoint) if training_args.compute_time: end.record() torch.cuda.synchronize() total_time = (start.elapsed_time(end) / (1000 * 60)) performance_metrics.update({'total_time in minutes ': total_time}) if model_args.save_adapter_weights: if (not os.path.exists(os.path.join(training_args.output_dir, 'adapter_params'))): os.mkdir(os.path.join(training_args.output_dir, 'adapter_params')) layer_cnt = 0 for (name, param) in trainer.model.named_parameters(): if (name in params_to_save): save_path = os.path.join(training_args.output_dir, f'adapter_params/{name}.pt') print(name) torch.save(param, save_path) layer_cnt += 1 print(f'finished saving adapters! saved {layer_cnt} number of layers') exit() if model_args.save_prefix_only: for (name, param) in trainer.model.named_parameters(): if ((model_args.attn_prefix_tuning is False) and (('prefix_shared' in name) or ('prefix' in name))): shared_params = param torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt')) elif ((model_args.attn_prefix_tuning is True) and (name == 'prefix_shared')): shared_params = param if (model_args.shared_attn is True): for i in range(config.num_target): torch.save(shared_params[i], os.path.join(training_args.output_dir, 'prefix_embeddings_{}.pt'.format(i))) else: torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_Wa.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_Wa_weights.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_down.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_W_down.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_up.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_W_up.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'layer_norm_weight.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.bias' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'layer_norm_bias.pt')) else: trainer.save_model() train_metrics = train_result.metrics max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset)) train_metrics['train_samples'] = min(max_train_samples, len(train_dataset)) trainer.log_metrics('train', train_metrics) trainer.save_metrics('train', train_metrics) if (not model_args.save_prefix_only): trainer.save_state() if (torch.cuda.is_available() and training_args.compute_memory): peak_memory = ((torch.cuda.max_memory_allocated() / (1024 ** 2)) / 1000) print('Memory utilization', peak_memory, 'GB') performance_metrics.update({'peak_memory': peak_memory}) if (training_args.compute_memory or training_args.compute_time): trainer.save_metrics('performance', performance_metrics) if ((model_args.shared_attn is True) and (model_args.ignore_target is False)): learned_embeddings = trainer.model.encoder.prefix_emb.clone().detach() results = {} if training_args.do_eval: logger.info('*** Evaluate ***') if (model_args.shared_attn is True): for (task, eval_dataset) in eval_datasets.items(): metrics = trainer.evaluate(eval_dataset=eval_dataset, max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, task=task) trainer.log_metrics(f'eval_{task}_', metrics) trainer.save_metrics(f"eval_{task.replace('/', ' ')}_", metrics) if training_args.wandb_log: wandb.log({f'eval_{task}_': metrics}) else: for (task, eval_dataset) in eval_datasets.items(): print('') print(task) print('') metrics = trainer.evaluate(eval_dataset=eval_dataset, max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, task=task) trainer.log_metrics(f'eval_{task}_', metrics) trainer.save_metrics(f"eval_{task.replace('/', ' ')}_", metrics) if training_args.wandb_log: wandb.log({f'eval_{task}_': metrics}) if model_args.save_prefix_only: checkpoints = glob.glob(os.path.join(training_args.output_dir, 'checkpoint-*')) for checkpoint_dir in checkpoints: if (not os.path.exists(os.path.join(checkpoint_dir, 'pytorch_model.bin'))): continue checkpoint_model = torch.load(os.path.join(os.path.join(checkpoint_dir, 'pytorch_model.bin'))) new_dir = '{}_prompt_only'.format(checkpoint_dir) os.mkdir(new_dir) for (name, param) in checkpoint_model.items(): if ((model_args.attn_prefix_tuning is False) and (('prefix_shared' in name) or ('prefix' in name))): shared_params = param torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt')) elif ((model_args.attn_prefix_tuning is True) and (name == 'prefix_shared')): shared_params = param if (model_args.shared_attn is True): for i in range(config.num_target): torch.save(shared_params[i], os.path.join(new_dir, 'prefix_embeddings_{}.pt'.format(i))) else: torch.save(shared_params, os.path.join(new_dir, 'prefix_embeddings.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_Wa.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(new_dir, 'attn_Wa_weights.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_down.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(new_dir, 'attn_W_down.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_up.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(new_dir, 'attn_W_up.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.weight' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(new_dir, 'layer_norm_weight.pt')) if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.bias' == name)): attn_weights_params = param torch.save(attn_weights_params, os.path.join(new_dir, 'layer_norm_bias.pt')) try: shutil.rmtree(checkpoint_dir) except OSError as e: print(('Error: %s : %s' % (checkpoint_dir, e.strerror))) if training_args.do_test: logger.info('*** Test ***') if (model_args.shared_attn is True): for (idx, (task, test_dataset)) in enumerate(test_datasets.items()): trainer.model.encoder.prefix_emb[0].data = learned_embeddings[idx] metrics = trainer.evaluate(eval_dataset=test_dataset, max_length=data_args.test_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='test', task=task) trainer.log_metrics(f'test_{task}_', metrics) trainer.save_metrics(f"test_{task.replace('/', ' ')}_", metrics) else: for (task, test_dataset) in test_datasets.items(): metrics = trainer.evaluate(eval_dataset=test_dataset, max_length=data_args.test_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='test', task=task) trainer.log_metrics(f'test_{task}_', metrics) trainer.save_metrics(f"test_{task.replace('/', ' ')}_", metrics) return results
def cfg_base(): task = 'autoencoding' model_base_path = '/mnt/models/' store_representation = True store_prediction = True folders_to_convert = None split_to_convert = None batch_size = 64 n_dataloader_workers = 8 data_dir = '/mnt/data' save_dir = '/mnt/data'
class Conv2DLayer(object): def __init__(self, input_layer, n_filters, filter_size, weights_std, init_bias_value, stride=1, nonlinearity=layers.rectify, dropout=0.0, partial_sum=None, pad=0, untie_biases=False, trainable=True): self.input_layer = input_layer self.input_shape = self.input_layer.get_output_shape() self.n_filters = n_filters n_channels = self.input_shape[0] self.n_channels = n_channels self.filter_size = filter_size self.weights_std = numpy.float32(weights_std) self.init_bias_value = numpy.float32(init_bias_value) self.stride = stride self.nonlinearity = nonlinearity self.dropout = dropout self.partial_sum = partial_sum self.pad = pad self.untie_biases = untie_biases self.mb_size = self.input_layer.mb_size self.filter_shape = (n_channels, filter_size, filter_size, n_filters) self.trainable = trainable self.W = layers.shared_single(4) if self.untie_biases: self.b = layers.shared_single(3) else: self.b = layers.shared_single(1) self.params = [self.W, self.b] self.bias_params = [self.b] self.data_order = layers.data_order.type2 assert (len(self.input_layer.get_output_shape()) == 4), 'Input must have 4 dimensions.' assert (self.input_layer.data_order == self.data_order), "Input data order does not match this layer's data order." self.reset_params() self.filter_acts_op = FilterActs(stride=self.stride, partial_sum=self.partial_sum, pad=self.pad) def reset_params(self): self.W.set_value((numpy.random.randn(*self.filter_shape).astype(numpy.float32) * self.weights_std)) if self.untie_biases: self.b.set_value((numpy.ones(self.get_output_shape()[:3]).astype(numpy.float32) * self.init_bias_value)) else: self.b.set_value((numpy.ones(self.n_filters).astype(numpy.float32) * self.init_bias_value)) def get_output_shape(self): output_width = int(numpy.ceil((((((self.input_shape[1] + (2 * self.pad)) - self.filter_size) + self.stride) * 1.0) / self.stride))) output_height = int(numpy.ceil((((((self.input_shape[2] + (2 * self.pad)) - self.filter_size) + self.stride) * 1.0) / self.stride))) output_shape = (self.n_filters, output_width, output_height, self.mb_size) return output_shape def output(self, input=None, dropout_active=True, *args, **kwargs): if (input is None): input = self.input_layer.output(*args, dropout_active=dropout_active, **kwargs) if (dropout_active and (self.dropout > 0.0)): retain_prob = (1 - self.dropout) mask = layers.srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32') input = ((input / retain_prob) * mask) contiguous_input = gpu_contiguous(input) contiguous_filters = gpu_contiguous(self.W) conved = self.filter_acts_op(contiguous_input, contiguous_filters) if self.untie_biases: conved += self.b.dimshuffle(0, 1, 2, 'x') else: conved += self.b.dimshuffle(0, 'x', 'x', 'x') return self.nonlinearity(conved)
class InvertedResidual(nn.Module): def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, conv_kwargs=None, drop_connect_rate=0.0): super(InvertedResidual, self).__init__() norm_kwargs = (norm_kwargs or {}) conv_kwargs = (conv_kwargs or {}) mid_chs: int = make_divisible((in_chs * exp_ratio)) self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip)) self.drop_connect_rate = drop_connect_rate self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) self.bn1 = norm_layer(mid_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) self.conv_dw = select_conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True, **conv_kwargs) self.bn2 = norm_layer(mid_chs, **norm_kwargs) self.act2 = act_layer(inplace=True) if ((se_ratio is not None) and (se_ratio > 0.0)): se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) self.bn3 = norm_layer(out_chs, **norm_kwargs) def forward(self, x): residual = x x = self.conv_pw(x) x = self.bn1(x) x = self.act1(x) x = self.conv_dw(x) x = self.bn2(x) x = self.act2(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn3(x) if self.has_residual: if (self.drop_connect_rate > 0.0): x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x
class BasicTokenizer(object): def __init__(self, do_lower_case=False, never_split=None, tokenize_chinese_chars=True): if (never_split is None): never_split = [] self.do_lower_case = do_lower_case self.never_split = never_split self.tokenize_chinese_chars = tokenize_chinese_chars def tokenize(self, text, never_split=None): never_split = (self.never_split + (never_split if (never_split is not None) else [])) text = self._clean_text(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if (token not in never_split): token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if (cat == 'Mn'): continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if ((never_split is not None) and (text in never_split)): return [text] chars = list(text) i = 0 start_new_word = True output = [] while (i < len(chars)): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[(- 1)].append(char) i += 1 return [''.join(x) for x in output] def _clean_text(self, text): output = [] for char in text: cp = ord(char) if ((cp == 0) or (cp == 65533) or _is_control(char)): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
def process_checkpoint(in_file, out_file): checkpoint = torch.load(in_file, map_location='cpu') if ('optimizer' in checkpoint): del checkpoint['optimizer'] torch.save(checkpoint, out_file) sha = calculate_file_sha256(out_file) final_file = (out_file.rstrip('.pth') + f'-{sha[:8]}.pth') os.rename(out_file, final_file) final_file_name = osp.split(final_file)[1] final_file_name = osp.splitext(final_file_name)[0] return final_file_name
class AbstractActionSpace(abc.ABC): def step(self, state, action): pass def reset(self, state): pass def random_action(self): pass def action_spec(self): pass
def STFT(fl): (f, t, Zxx) = signal.stft(fl, nperseg=64) img = (np.abs(Zxx) / len(Zxx)) return img
class Segment(): def __init__(self, model: str, class_idx: Optional[int]=None, threshold_direction: str='less'): import slideflow.segment if (threshold_direction not in ['less', 'greater']): raise ValueError('Invalid threshold_direction: {}. Expected one of: less, greater'.format(threshold_direction)) self.model_path = model self.class_idx = class_idx (self.model, self.cfg) = sf.segment.load_model_and_config(model) self.threshold_direction = threshold_direction def __repr__(self): return 'Segment(model={!r})'.format(self.model_path) def generate_rois(self, wsi: 'sf.WSI', apply: bool=True) -> List[np.ndarray]: try: from cellpose.utils import outlines_list except ImportError: raise ImportError("Cellpose must be installed for generating ROIs from a segmentation model. Cellpose can be installed via 'pip install cellpose'.") preds = self(wsi, threshold=None) labels = None if (self.cfg.mode == 'binary'): (labeled, n_rois) = label((preds > 0)) outlines = outlines_list(labeled) outlines = [o for o in outlines if o.shape[0]] elif (self.cfg.mode == 'multiclass'): pred_max = preds.argmax(axis=0) outlines = [] labels = [] for i in range(preds.shape[0]): if (i == 0): continue (labeled, n_rois) = label((pred_max == i)) _outlined = outlines_list(labeled) outlines += _outlined if (self.cfg.labels and (len(self.cfg.labels) >= i)): lbl = self.cfg.labels[(i - 1)] else: lbl = i labels += ([lbl] * len(_outlined)) labels = [labels[l] for l in range(len(labels)) if outlines[l].shape[0]] outlines = [o for o in outlines if o.shape[0]] elif (self.cfg.mode == 'multilabel'): outlines = [] labels = [] for i in range(preds.shape[0]): (labeled, n_rois) = label((preds[i] > 0)) _outlined = outlines_list(labeled) outlines += _outlined if (self.cfg.labels and (len(self.cfg.labels) > i)): lbl = self.cfg.labels[i] else: lbl = i labels += ([lbl] * len(_outlined)) labels = [labels[l] for l in range(len(labels)) if outlines[l].shape[0]] outlines = [o for o in outlines if o.shape[0]] else: raise ValueError('Invalid loss mode: {}. Expected one of: binary, multiclass, multilabel'.format(self.cfg.mode)) outlines = [(o * (self.cfg.mpp / wsi.mpp)) for o in outlines] if (labels is not None): assert (len(outlines) == len(labels)), 'Number of outlines and labels must match.' if apply: for (o, outline) in enumerate(outlines): wsi.load_roi_array(outline, process=False, label=(None if (labels is None) else labels[o])) wsi.process_rois() return outlines def __call__(self, wsi: Union[('sf.WSI', np.ndarray)], threshold: Optional[float]=0) -> np.ndarray: if isinstance(wsi, sf.WSI): preds = self.model.run_slide_inference(wsi) else: preds = self.model.run_tiled_inference(wsi) if ((threshold is None) or (threshold is False)): return preds if (self.cfg.mode == 'binary'): if (self.threshold_direction == 'less'): return (preds < threshold) else: return (preds > threshold) elif (self.cfg.mode == 'multiclass'): if (self.class_idx is not None): return (preds.argmax(axis=0) != self.class_idx) else: return (preds.argmax(axis=0) == 0) elif (self.cfg.mode == 'multilabel'): if ((self.class_idx is not None) and (self.threshold_direction == 'less')): return (preds[self.class_idx] < threshold) elif ((self.class_idx is not None) and (self.threshold_direction == 'greater')): return (preds[self.class_idx] > threshold) elif (self.threshold_direction == 'less'): return np.all((preds < threshold), axis=0) else: return np.all((preds > threshold), axis=0)
class Timer(object): def __init__(self): self.total = 0 def start(self): self.start_time = time.time() def finish(self): self.total += (time.time() - self.start_time)
class FairseqLanguageModel(BaseFairseqModel): def __init__(self, decoder): super().__init__() self.decoder = decoder def forward(self, src_tokens, **kwargs): return self.decoder(src_tokens, **kwargs) def max_positions(self): return self.decoder.max_positions() def supported_targets(self): return {'future'} def remove_head(self): raise NotImplementedError()
class DLA(nn.Module): def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, cardinality=1, base_width=64, block=DlaBottle2neck, residual_root=False, drop_rate=0.0, global_pool='avg'): super(DLA, self).__init__() self.channels = channels self.num_classes = num_classes self.cardinality = cardinality self.base_width = base_width self.drop_rate = drop_rate assert (output_stride == 32) self.base_layer = nn.Sequential(nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), nn.BatchNorm2d(channels[0]), nn.ReLU(inplace=True)) self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) cargs = dict(cardinality=cardinality, base_width=base_width, root_residual=residual_root) self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) self.feature_info = [dict(num_chs=channels[0], reduction=1, module='level0'), dict(num_chs=channels[1], reduction=2, module='level1'), dict(num_chs=channels[2], reduction=4, module='level2'), dict(num_chs=channels[3], reduction=8, module='level3'), dict(num_chs=channels[4], reduction=16, module='level4'), dict(num_chs=channels[5], reduction=32, module='level5')] self.num_features = channels[(- 1)] (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): modules = [] for i in range(convs): modules.extend([nn.Conv2d(inplanes, planes, kernel_size=3, stride=(stride if (i == 0) else 1), padding=dilation, bias=False, dilation=dilation), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)]) inplanes = planes return nn.Sequential(*modules) def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes (self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) def forward_features(self, x): x = self.base_layer(x) x = self.level0(x) x = self.level1(x) x = self.level2(x) x = self.level3(x) x = self.level4(x) x = self.level5(x) return x def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) if (self.drop_rate > 0.0): x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.fc(x) if (not self.global_pool.is_identity()): x = x.flatten(1) return x
def test_dummy_parameter_encoder_can_be_instantiated(): model = DummyParameterEncoder((1, 1)) assert (model is not None)
def latent_noise(latent, strength): noise = (torch.randn_like(latent) * strength) return (latent + noise)
def test_compute_calls(snapshot): assert (json.dumps(eia_api_v2.EIASession().compute_facet_options(eia_api_v2.ROUTES)) == snapshot(name='Output from compute_facet_options'))
class colour3(): def __init__(self, nR=0, nG=0, nB=0): self.R = nR self.G = nG self.B = nB
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif (last_checkpoint is not None): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)) logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s', training_args) set_seed(training_args.seed) train_dataset = datasets.load_dataset('common_voice', data_args.dataset_config_name, split=data_args.train_split_name) eval_dataset = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test') chars_to_ignore_regex = f"[{''.join(data_args.chars_to_ignore)}]" def remove_special_characters(batch): batch['text'] = (re.sub(chars_to_ignore_regex, '', batch['sentence']).lower() + ' ') return batch train_dataset = train_dataset.map(remove_special_characters, remove_columns=['sentence']) eval_dataset = eval_dataset.map(remove_special_characters, remove_columns=['sentence']) def extract_all_chars(batch): all_text = ' '.join(batch['text']) vocab = list(set(all_text)) return {'vocab': [vocab], 'all_text': [all_text]} vocab_train = train_dataset.map(extract_all_chars, batched=True, batch_size=(- 1), keep_in_memory=True, remove_columns=train_dataset.column_names) vocab_test = train_dataset.map(extract_all_chars, batched=True, batch_size=(- 1), keep_in_memory=True, remove_columns=eval_dataset.column_names) vocab_list = list((set(vocab_train['vocab'][0]) | set(vocab_test['vocab'][0]))) vocab_dict = {v: k for (k, v) in enumerate(vocab_list)} vocab_dict['|'] = vocab_dict[' '] del vocab_dict[' '] vocab_dict['[UNK]'] = len(vocab_dict) vocab_dict['[PAD]'] = len(vocab_dict) with open('vocab.json', 'w') as vocab_file: json.dump(vocab_dict, vocab_file) tokenizer = Wav2Vec2CTCTokenizer('vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|') feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) model = Wav2Vec2ForCTC.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer)) if (data_args.max_train_samples is not None): train_dataset = train_dataset.select(range(data_args.max_train_samples)) if (data_args.max_val_samples is not None): eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) resampler = torchaudio.transforms.Resample(48000, 16000) def speech_file_to_array_fn(batch): (speech_array, sampling_rate) = torchaudio.load(batch['path']) batch['speech'] = resampler(speech_array).squeeze().numpy() batch['sampling_rate'] = 16000 batch['target_text'] = batch['text'] return batch train_dataset = train_dataset.map(speech_file_to_array_fn, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers) eval_dataset = eval_dataset.map(speech_file_to_array_fn, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers) def prepare_dataset(batch): assert (len(set(batch['sampling_rate'])) == 1), f'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.' batch['input_values'] = processor(batch['speech'], sampling_rate=batch['sampling_rate'][0]).input_values with processor.as_target_processor(): batch['labels'] = processor(batch['target_text']).input_ids return batch train_dataset = train_dataset.map(prepare_dataset, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers) eval_dataset = eval_dataset.map(prepare_dataset, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers) wer_metric = datasets.load_metric('wer') def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=(- 1)) pred.label_ids[(pred.label_ids == (- 100))] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {'wer': wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) trainer = CTCTrainer(model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=processor.feature_extractor) if training_args.do_train: if (last_checkpoint is not None): checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None if is_main_process(training_args.local_rank): processor.save_pretrained(training_args.output_dir) train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset)) metrics['train_samples'] = min(max_train_samples, len(train_dataset)) trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() results = {} if training_args.do_eval: logger.info('*** Evaluate ***') metrics = trainer.evaluate() max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset)) metrics['eval_samples'] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) return results
def _bhy_threshold(pvals, reshaping_function=None, fdr=0.1): n_features = len(pvals) pvals_sorted = np.sort(pvals) selected_index = (2 * n_features) if (reshaping_function is None): temp = np.arange(n_features) sum_inverse = np.sum((1 / (temp + 1))) return _bhq_threshold(pvals, (fdr / sum_inverse)) else: for i in range((n_features - 1), (- 1), (- 1)): if (pvals_sorted[i] <= ((fdr * reshaping_function((i + 1))) / n_features)): selected_index = i break if (selected_index <= n_features): return pvals_sorted[selected_index] else: return (- 1.0)
class DreamerLearnerConfig(DreamerConfig): def __init__(self): super().__init__() self.MODEL_LR = 0.0002 self.ACTOR_LR = 0.0005 self.VALUE_LR = 0.0005 self.CAPACITY = 500000 self.MIN_BUFFER_SIZE = 100 self.MODEL_EPOCHS = 1 self.EPOCHS = 1 self.PPO_EPOCHS = 5 self.MODEL_BATCH_SIZE = 40 self.BATCH_SIZE = 40 self.SEQ_LENGTH = 50 self.N_SAMPLES = 1 self.TARGET_UPDATE = 1 self.DEVICE = 'cpu' self.GRAD_CLIP = 100.0 self.HORIZON = 15 self.ENTROPY = 0.001 self.ENTROPY_ANNEALING = 0.99998 self.GRAD_CLIP_POLICY = 100.0 def create_learner(self): return DreamerLearner(self)
def load_model(type, folder, checkpoint, temperature, device, dataset='cifar10', load_temp=False, model_params=None): dataset = dataset.lower() if (dataset == 'cifar10'): dataset_dir = 'Cifar10Models' num_classes = 10 model_family = 'Cifar32' elif (dataset == 'cifar100'): dataset_dir = 'Cifar100Models' num_classes = 100 model_family = 'Cifar32' elif (dataset == 'svhn'): dataset_dir = 'SVHNModels' num_classes = 10 model_family = 'Cifar32' elif (dataset == 'tinyImageNet'): dataset_dir = 'TinyImageNetModels' num_classes = 200 model_family = 'ImageNet224' elif (dataset == 'restrictedimagenet'): dataset_dir = 'RestrictedImageNetModels' num_classes = 9 model_family = 'ImageNet224' elif (dataset == 'imagenet'): dataset_dir = 'ImageNetModels' num_classes = 1000 model_family = 'ImageNet224' elif (dataset == 'imagenet100'): dataset_dir = 'ImageNet100Models' num_classes = 100 model_family = 'ImageNet224' elif (dataset == 'pets'): dataset_dir = 'PetsModels' num_classes = 37 model_family = 'ImageNet224' elif (dataset == 'flowers'): dataset_dir = 'FlowersModels' num_classes = 102 model_family = 'ImageNet224' elif (dataset == 'cars'): dataset_dir = 'CarsModels' num_classes = 196 model_family = 'ImageNet224' elif (dataset == 'food-101'): dataset_dir = 'Food-101Models' num_classes = 101 model_family = 'ImageNet224' elif (dataset == 'lsun_scenes'): dataset_dir = 'LSUNScenesModels' num_classes = 10 model_family = 'ImageNet224' else: raise ValueError('Dataset not supported') if (type in non_native_model): model = load_non_native_model(type, folder, device) if (temperature is not None): model = TemperatureWrapper(model, temperature) return model if ('BiT' in type): model = load_big_transfer_model(type, folder, checkpoint, device, dataset_dir, num_classes, load_temp=load_temp) model = BigTransferWrapper(model) else: if (model_family == 'Cifar32'): model = load_cifar_family_model(type, folder, checkpoint, device, dataset_dir, num_classes, load_temp=load_temp, model_params=model_params) elif (model_family == 'ImageNet224'): model = load_imagenet_family_model(type, folder, checkpoint, device, dataset_dir, num_classes, load_temp=load_temp, model_params=model_params) else: raise ValueError() if (dataset == 'cifar10'): model = Cifar10Wrapper(model) elif (dataset == 'cifar100'): model = Cifar100Wrapper(model) elif (dataset == 'svhn'): model = SVHNWrapper(model) elif (dataset == 'tinyimagenet'): model = Cifar100Wrapper(model) elif (dataset == 'imagenet'): model = ImageNetWrapper(model) elif (dataset == 'restrictedimagenet'): model = RestrictedImageNetWrapper(model) elif (dataset == 'imagenet100'): model = ImageNetWrapper(model) elif (dataset == 'pets'): model = ImageNetWrapper(model) elif (dataset == 'food-101'): model = ImageNetWrapper(model) elif (dataset == 'cars'): model = ImageNetWrapper(model) elif (dataset == 'flowers'): model = ImageNetWrapper(model) elif (dataset == 'lsun_scenes'): model = ImageNetWrapper(model) else: raise ValueError('Dataset not supported') model.to(device) if (temperature is not None): model = TemperatureWrapper(model, temperature) model.eval() return model
def _export_pytorch_model(f, pytorch_model, dummy_input): kwargs = {'do_constant_folding': False, 'export_params': True, 'enable_onnx_checker': False, 'input_names': ['input'], 'output_names': ['output']} try: torch.onnx.export(pytorch_model, dummy_input, f, **kwargs) except TypeError: kwargs.pop('enable_onnx_checker') torch.onnx.export(pytorch_model, dummy_input, f, **kwargs) return f
class Reducer(nn.Module): def __init__(self, dim, exclude_self=True, exists=True): super().__init__() self.dim = dim self.exclude_self = exclude_self self.exists = exists def forward(self, inputs): shape = inputs.size() (inp0, inp1) = (inputs, inputs) if self.exclude_self: mask = exclude_mask(inputs, cnt=self.dim, dim=((- 1) - self.dim)) inp0 = mask_value(inputs, mask, 0.0) inp1 = mask_value(inputs, mask, 1.0) if self.exists: shape = (shape[:(- 2)] + ((shape[(- 1)] * 2),)) exists = torch.max(inp0, dim=(- 2))[0] forall = torch.min(inp1, dim=(- 2))[0] return torch.stack((exists, forall), dim=(- 1)).view(shape) shape = (shape[:(- 2)] + (shape[(- 1)],)) return torch.max(inp0, dim=(- 2))[0].view(shape) def get_output_dim(self, input_dim): if self.exists: return (input_dim * 2) return input_dim
def set_reactivity(line_: str) -> None: line_ = line_.lower().strip() usage = f'Usage: %flow reactivity [{ReactivityMode.BATCH}|{ReactivityMode.INCREMENTAL}]' if (line_ in ('batch', 'incremental')): reactivity = ReactivityMode(line_) else: warn(usage) return flow().mut_settings.reactivity_mode = reactivity
class TestRetryDifferentOnError(): def test_default(self): class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) (x, y, meta) = dataset[1] assert (x['item'] == 1), 'Loading of item (without exception) failed.' assert ('errors' in meta), "Missing 'error' key when logging errors" (x, y, meta) = dataset[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' def test_exc_single(self): class TmpData(Dataset, retry_exc=ValueError, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) (x, y, meta) = TmpData(10)[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' def test_exc_ignore(self): class TmpData(Dataset, retry_exc=ValueError, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise TypeError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) _ = dataset[1] with pytest.raises(TypeError): _ = dataset[2] def test_exc_multiple(self): class TmpData(Dataset, retry_exc=[ValueError, TypeError], silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError if ((item % 3) == 0): raise TypeError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) (x, y, meta) = dataset[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' (x, y, meta) = dataset[3] assert (x['item'] != 2), 'Loading of item (with exception) failed.' def test_exc_none(self): class TmpData(Dataset, retry_exc=None, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError if ((item % 3) == 0): raise TypeError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) _ = dataset[5] with pytest.raises(ValueError): _ = dataset[2] with pytest.raises(TypeError): _ = dataset[3] def test_silent(self): class TmpData(Dataset, retry_exc=Exception, silent=True, max_retries=None, use_blacklist=False): def getitem(self, item): if ((item % 2) == 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) (x, y, meta) = dataset[2] assert (x['item'] != 2), 'Loading of item (with exception) failed.' assert ('errors' not in meta), 'Error when disabling exception catching.' def test_max_retries(self): class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): raise ValueError with pytest.raises(RecursionError): _ = TmpData(10)[0] class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=5, use_blacklist=False): def getitem(self, item): raise ValueError with pytest.raises(RuntimeError): _ = TmpData(10)[0] def test_blacklist(self): class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=True): def getitem(self, item): if (item != 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) _ = [dataset[i] for i in range(10)] for i in range(10): (x, y, meta) = dataset[i] assert (x['item'] == 0) for j in range(10): if (i != j): assert (str(j) not in meta['errors']), 'Error including item in blacklist.' def test_blacklist_none(self): class TmpData(Dataset, retry_exc=Exception, silent=False, max_retries=None, use_blacklist=False): def getitem(self, item): if (item != 0): raise ValueError return ({'item': item}, {}, {'item': str(item)}) dataset = TmpData(10) num_errors = [] for i in range(10): (x, y, meta) = dataset[i] assert (x['item'] == 0), 'Error loading correct item.' num_errors.append(meta['errors'].count('ValueError')) assert (max(num_errors) > 1), 'Error when repeating exception items.'
def rescale(img): (w, h) = img.size min_len = min(w, h) (new_w, new_h) = (min_len, min_len) scale_w = ((w - new_w) // 2) scale_h = ((h - new_h) // 2) box = (scale_w, scale_h, (scale_w + new_w), (scale_h + new_h)) img = img.crop(box) return img
def train_topmine_ngrammer(documents, threshhold=1, max_ngramm_len=3, min_word_len=2, regexp='[.,!?;: ]', stopwords=None): splitted_docs = [] for doc in documents: if isinstance(doc, str): splitted_docs.append(split_document_by_delimeters(doc, regexp, min_word_len=min_word_len, stopwords=stopwords)) elif isinstance(doc, list): splitted_docs.append(doc) else: print('Wrong document format') ng = None try: ng = NGrammer(regexp=regexp) ng.frequentPhraseMining(splitted_docs, threshhold=threshhold, max_ngramm_len=max_ngramm_len) except Exception: print('Exception occurred while training ngrammer for abstracts') return ng
def main(): parser = argparse.ArgumentParser() parser.add_argument('--apply', dest='apply', action='store_true', default=False, help='Apply style to files in-place.') parser.add_argument('--no_parallel', dest='no_parallel', action='store_true', default=False, help='Disable parallel execution.') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='If true, prints file names while formatting.') args = parser.parse_args() (clang_format_bin, clang_format_version) = _find_clang_format() print(f'Using clang-format {clang_format_bin} ({clang_format_bin})') print(f'Using yapf {yapf.__version__} ({yapf.__file__})') print(f'Using nbformat {nbformat.__version__} ({nbformat.__file__})') pwd = Path(__file__).resolve().parent python_style_config = str((pwd.parent / '.style.yapf')) cpp_ignored_files = ['cpp/open3d/visualization/shader/Shader.h'] cpp_files = _glob_files(CPP_FORMAT_DIRS, ['h', 'cpp', 'cuh', 'cu', 'isph', 'ispc', 'h.in']) cpp_files = _filter_files(cpp_files, cpp_ignored_files) cpp_formatter = CppFormatter(cpp_files, clang_format_bin=clang_format_bin) python_formatter = PythonFormatter(_glob_files(PYTHON_FORMAT_DIRS, ['py']), style_config=python_style_config) jupyter_formatter = JupyterFormatter(_glob_files(JUPYTER_FORMAT_DIRS, ['ipynb']), style_config=python_style_config) changed_files = [] wrong_header_files = [] (changed_files_cpp, wrong_header_files_cpp) = cpp_formatter.run(apply=args.apply, no_parallel=args.no_parallel, verbose=args.verbose) changed_files.extend(changed_files_cpp) wrong_header_files.extend(wrong_header_files_cpp) (changed_files_python, wrong_header_files_python) = python_formatter.run(apply=args.apply, no_parallel=args.no_parallel, verbose=args.verbose) changed_files.extend(changed_files_python) wrong_header_files.extend(wrong_header_files_python) changed_files.extend(jupyter_formatter.run(apply=args.apply, no_parallel=args.no_parallel, verbose=args.verbose)) if ((len(changed_files) == 0) and (len(wrong_header_files) == 0)): print('All files passed style check') exit(0) if args.apply: if (len(changed_files) != 0): print('Style applied to the following files:') print('\n'.join(changed_files)) if (len(wrong_header_files) != 0): print('Please correct license header *manually* in the following files (see util/check_style.py for the standard header):') print('\n'.join(wrong_header_files)) exit(1) else: error_files_no_duplicates = list(set((changed_files + wrong_header_files))) if (len(error_files_no_duplicates) != 0): print('Style error found in the following files:') print('\n'.join(error_files_no_duplicates)) exit(1)
class LitResnet(pl.LightningModule): def __init__(self, lr=0.1, dataset_size=50000): super().__init__() self.rng = torch.Generator().manual_seed(40) self.lr = lr self.n_classes = 10 self.dims = (3, 32, 32) self.datasize = dataset_size self.model = modified_resnet() self.test_error = (1 - torchmetrics.Accuracy()) self.train_error = (1 - torchmetrics.Accuracy()) self.soft_error = SoftError() self.train_step_num = 0 def forward(self, x): out = self.model(x) return F.log_softmax(out, dim=1) def training_step(self, batch, batch_idx): (x, y) = batch logits = self(x) loss = F.nll_loss(logits, y) preds = torch.argmax(logits, dim=1) self.train_error.update(preds, y) self.log('TrainLoss', loss) self.train_step_num += 1 return loss def training_epoch_end(self, outs): pass def validation_step(self, batch, batch_idx): (x, y) = batch logits = self(x) preds = torch.argmax(logits, dim=1) self.test_error.update(preds, y) self.soft_error.update(torch.exp(logits), y) self.log('TestError', self.test_error, prog_bar=True) self.log('SoftError', self.soft_error) if (self.train_step_num > 0): self.log('TrainError', self.train_error) def predict_step(self, batch, batch_idx, dataloader_idx=0): (x, y) = batch y_hat = self.model(x) return y_hat def configure_optimizers(self): optimizer = torch.optim.SGD(self.parameters(), lr=self.lr, momentum=0.9) max_epochs = ((MAX_STEPS / self.datasize) * BATCH_SZ) scheduler_dict = {'scheduler': torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=max_epochs), 'interval': 'epoch'} return {'optimizer': optimizer, 'lr_scheduler': scheduler_dict} def prepare_data(self): CIFAR10('cifar10/', download=True) def setup(self, stage): train_transforms = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor()]) self.traindata = CIFAR10('cifar10/', train=True, download=True, transform=train_transforms) self.testdata = CIFAR10('cifar10/', train=False, download=True, transform=transforms.ToTensor()) def train_dataloader(self): (small_trainset, _) = random_split(self.traindata, [self.datasize, (len(self.traindata) - self.datasize)]) return DataLoader(small_trainset, batch_size=BATCH_SZ, num_workers=2, shuffle=True) def val_dataloader(self): return DataLoader(self.testdata, batch_size=BATCH_SZ, num_workers=2)
def get_latest_checkpoint_number(base_directory): glob = os.path.join(base_directory, 'sentinel_checkpoint_complete.*') def extract_iteration(x): return int(x[(x.rfind('.') + 1):]) try: checkpoint_files = tf.gfile.Glob(glob) except tf.errors.NotFoundError: return (- 1) try: latest_iteration = max((extract_iteration(x) for x in checkpoint_files)) return latest_iteration except ValueError: return (- 1)
class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.lists = [] def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count) for _ in range(0, n): self.lists.append(val) def std(self): std_sum = 0.0 for i in range(0, len(self.lists)): std_sum = (std_sum + pow((self.lists[i] - self.avg), 2)) std_sum = pow((std_sum / self.count), 0.5) return std_sum
class Tee(): def __init__(self, fname, mode='a'): self.stdout = sys.stdout self.file = open(fname, mode) def write(self, message): self.stdout.write(message) self.file.write(message) self.flush() def flush(self): self.stdout.flush() self.file.flush()
class CIFARSEResNet(nn.Module): def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARSEResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for (i, channels_per_stage) in enumerate(channels): stage = nn.Sequential() for (j, out_channels) in enumerate(channels_per_stage): stride = (2 if ((j == 0) and (i != 0)) else 1) stage.add_module('unit{}'.format((j + 1)), SEResUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add_module('stage{}'.format((i + 1)), stage) self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=8, stride=1)) self.output = nn.Linear(in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for (name, module) in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if (module.bias is not None): init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), (- 1)) x = self.output(x) return x
class LukeForEntitySpanClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def init_settings(args): args.methods = [x.lower() for x in args.methods] os.makedirs('results', exist_ok=True) if (args.dataset == 'kitti'): if (not args.model): args.model = 'yolov3' if args.tasks: globals.TASKS = args.tasks else: globals.TASKS = config.KITTI_TASKS args.num_severities = max([len(args.fog_severities), len(args.rain_severities), len(args.snow_severities)]) globals.KITTI_SEVERITIES['fog'] = args.fog_severities globals.KITTI_SEVERITIES['rain'] = args.rain_severities globals.KITTI_SEVERITIES['snow'] = args.snow_severities def get_yolo_hyp(): return config.YOLO_HYP.copy() config.YOLO_HYP['lr0'] = args.lr args.yolo_hyp = get_yolo_hyp args.world_size = 1 args.global_rank = (- 1) args.img_size.extend(([args.img_size[(- 1)]] * (2 - len(args.img_size)))) args.total_batch_size = args.batch_size args.nc = 8 args.names = ['Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc'] else: if args.tasks: globals.TASKS = args.tasks else: globals.TASKS = config.ROBUSTNESS_TASKS if (args.dataset in ['imagenet', 'imagenet-mini']): from utils.datasets import ImgNet ImgNet.initial_dir = args.dataset args.num_severities = len(args.robustness_severities) args.severity = None config.ROBUSTNESS_SEVERITIES = args.robustness_severities if ((args.dataset == 'cifar10') and (not args.model)): args.model = 'wrn' elif ((args.dataset == 'cifar10') and (args.model == 'res26')): args.model = 'res26' elif ((args.dataset in ['imagenet', 'imagenet-mini']) and (not args.model)): args.model = 'res18'
def dmcp_resnet18(num_classes=1000, input_size=224, width=None, prob_type='exp'): if (width is None): width = [0.1, 1.0, 0.1] return DMCPResNet(DMCPBasicBlock, [2, 2, 2, 2], num_classes, input_size, width, prob_type)
def log1mexp(x: tf.Tensor, split_point: float=_log1mexp_switch, exp_zero_eps: float=1e-07) -> tf.Tensor: logexpm1_switch = (x > split_point) Z = tf.zeros_like(x) logexpm1 = tf.math.log(tf.clip_by_value((- tf.math.expm1(x[logexpm1_switch])), clip_value_min=1e-323, clip_value_max=float('inf'))) logexpm1_bw = tf.math.log(((- tf.math.expm1(x[logexpm1_switch])) + exp_zero_eps)) Z[logexpm1_switch] = (logexpm1.stop_gradient() + (logexpm1_bw - logexpm1_bw.stop_gradient())) Z[(~ logexpm1_switch)] = tf.math.log1p((- tf.math.exp(x[(~ logexpm1_switch)]))) return Z
('eval', timer=False) def evaluate(dataset, model): with hlog.task('train', timer=False): visualize(make_batch([dataset.sample_comp_train()], dataset.vocab, staged=True), dataset.vocab, model) print()
def train(): model.train() total_loss = 0 start_time = time.time() ntokens = len(corpus.dictionary) hidden = model.init_hidden(args.batch_size) for (batch, i) in enumerate(range(0, (train_data.size(0) - 1), args.bptt)): (data, targets) = get_batch(train_data, i) hidden = repackage_hidden(hidden) model.zero_grad() (output, hidden) = model(data, hidden) loss = criterion(output.view((- 1), ntokens), targets) loss.backward() torch.nn.utils.clip_grad_norm(model.parameters(), args.clip) for p in model.parameters(): p.data.add_((- lr), p.grad.data) total_loss += loss.data if (((batch % args.log_interval) == 0) and (batch > 0)): cur_loss = (total_loss[0] / args.log_interval) elapsed = (time.time() - start_time) print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format(epoch, batch, (len(train_data) // args.bptt), lr, ((elapsed * 1000) / args.log_interval), cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time()
class MultiprocessingPdb(pdb.Pdb): _stdin_fd = sys.stdin.fileno() _stdin = None _stdin_lock = multiprocessing.Lock() def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with self._stdin_lock: try: if (not self._stdin): self._stdin = os.fdopen(self._stdin_fd) sys.stdin = self._stdin self.cmdloop() finally: sys.stdin = stdin_bak
class RemBertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs): super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): pieces = self.sp_model.EncodeAsPieces(text) return pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): out_string = self.sp_model.decode_pieces(tokens) return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return ((cls + token_ids_0) + sep) return ((((cls + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return [(1 if (x in [self.sep_token_id, self.cls_token_id]) else 0) for x in token_ids_0] if (token_ids_1 is not None): return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([1] + ([0] * len(token_ids_0))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
def metric(pred, true): mae = MAE(pred, true) mse = MSE(pred, true) rmse = RMSE(pred, true) mape = MAPE(pred, true) mspe = MSPE(pred, true) return (mae, mse, rmse, mape, mspe)
class GridSamplerMine3dBackwardFunction(Function): def forward(ctx, input, grid, grad_output): ctx.save_for_backward(input, grid, grad_output) return GridSamplerMine.backward(input, grid, grad_output, 0, 1) def backward(ctx, grad_output_input, grad_output_grid): (input, grid, grad_output) = ctx.saved_tensors (o0, o1, o2) = GridSamplerMine.dbackward(grad_output_input, grad_output_grid, input, grid, grad_output, 0, 1) return (o0, o1, o2)
class PKT(nn.Module): 'Probabilistic Knowledge Transfer for deep representation learning\n Code from author: def __init__(self): super(PKT, self).__init__() def forward(self, f_s, f_t): return self.cosine_similarity_loss(f_s, f_t) def cosine_similarity_loss(output_net, target_net, eps=1e-07): output_net_norm = torch.sqrt(torch.sum((output_net ** 2), dim=1, keepdim=True)) output_net = (output_net / (output_net_norm + eps)) output_net[(output_net != output_net)] = 0 target_net_norm = torch.sqrt(torch.sum((target_net ** 2), dim=1, keepdim=True)) target_net = (target_net / (target_net_norm + eps)) target_net[(target_net != target_net)] = 0 model_similarity = torch.mm(output_net, output_net.transpose(0, 1)) target_similarity = torch.mm(target_net, target_net.transpose(0, 1)) model_similarity = ((model_similarity + 1.0) / 2.0) target_similarity = ((target_similarity + 1.0) / 2.0) model_similarity = (model_similarity / torch.sum(model_similarity, dim=1, keepdim=True)) target_similarity = (target_similarity / torch.sum(target_similarity, dim=1, keepdim=True)) loss = torch.mean((target_similarity * torch.log(((target_similarity + eps) / (model_similarity + eps))))) return loss
def train_model(epoch, model, dloader, dloader_val, optim, sched): model.train() print('[epoch {:03d}] training ...'.format(epoch)) print('[epoch {:03d}] # batches = {}'.format(epoch, len(dloader))) st = time.time() for (batch_idx, batch_samples) in enumerate(dloader): model.zero_grad() batch_enc_inp = batch_samples['enc_input'].permute(2, 0, 1).to(device) batch_dec_inp = batch_samples['dec_input'].permute(1, 0).to(device) batch_dec_tgt = batch_samples['dec_target'].permute(1, 0).to(device) batch_inp_bar_pos = batch_samples['bar_pos'].to(device) batch_inp_lens = batch_samples['length'] batch_padding_mask = batch_samples['enc_padding_mask'].to(device) batch_rfreq_cls = batch_samples['rhymfreq_cls'].permute(1, 0).to(device) batch_polyph_cls = batch_samples['polyph_cls'].permute(1, 0).to(device) global trained_steps trained_steps += 1 (mu, logvar, dec_logits) = model(batch_enc_inp, batch_dec_inp, batch_inp_bar_pos, batch_rfreq_cls, batch_polyph_cls, padding_mask=batch_padding_mask) if (not constant_kl): kl_beta = beta_cyclical_sched(trained_steps) else: kl_beta = kl_max_beta losses = model.compute_loss(mu, logvar, kl_beta, free_bit_lambda, dec_logits, batch_dec_tgt) if (trained_steps < lr_warmup_steps): curr_lr = ((max_lr * trained_steps) / lr_warmup_steps) optim.param_groups[0]['lr'] = curr_lr else: sched.step((trained_steps - lr_warmup_steps)) losses['total_loss'].backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optim.step() global recons_loss_ema, kl_loss_ema, kl_raw_ema recons_loss_ema = compute_loss_ema(recons_loss_ema, losses['recons_loss'].item()) kl_loss_ema = compute_loss_ema(kl_loss_ema, losses['kldiv_loss'].item()) kl_raw_ema = compute_loss_ema(kl_raw_ema, losses['kldiv_raw'].item()) print(' -- epoch {:03d} | batch {:03d}: len: {}\n\t * loss = (RC: {:.4f} | KL: {:.4f} | KL_raw: {:.4f}), step = {}, beta: {:.4f} time_elapsed = {:.2f} secs'.format(epoch, batch_idx, batch_inp_lens, recons_loss_ema, kl_loss_ema, kl_raw_ema, trained_steps, kl_beta, (time.time() - st))) if (not (trained_steps % log_interval)): log_data = {'ep': epoch, 'steps': trained_steps, 'recons_loss': recons_loss_ema, 'kldiv_loss': kl_loss_ema, 'kldiv_raw': kl_raw_ema, 'time': (time.time() - st)} log_epoch(os.path.join(ckpt_dir, 'log.txt'), log_data, is_init=(not os.path.exists(os.path.join(ckpt_dir, 'log.txt')))) if (not (trained_steps % val_interval)): vallosses = validate(model, dloader_val) with open(os.path.join(ckpt_dir, 'valloss.txt'), 'a') as f: f.write('[step {}] RC: {:.4f} | KL: {:.4f} | [val] | RC: {:.4f} | KL: {:.4f}\n'.format(trained_steps, recons_loss_ema, kl_raw_ema, np.mean(vallosses[0]), np.mean(vallosses[1]))) model.train() if (not (trained_steps % ckpt_interval)): torch.save(model.state_dict(), os.path.join(params_dir, 'step_{:d}-RC_{:.3f}-KL_{:.3f}-model.pt'.format(trained_steps, recons_loss_ema, kl_raw_ema))) torch.save(optim.state_dict(), os.path.join(optim_dir, 'step_{:d}-RC_{:.3f}-KL_{:.3f}-optim.pt'.format(trained_steps, recons_loss_ema, kl_raw_ema))) print('[epoch {:03d}] training completed\n -- loss = (RC: {:.4f} | KL: {:.4f} | KL_raw: {:.4f})\n -- time elapsed = {:.2f} secs.'.format(epoch, recons_loss_ema, kl_loss_ema, kl_raw_ema, (time.time() - st))) log_data = {'ep': epoch, 'steps': trained_steps, 'recons_loss': recons_loss_ema, 'kldiv_loss': kl_loss_ema, 'kldiv_raw': kl_raw_ema, 'time': (time.time() - st)} log_epoch(os.path.join(ckpt_dir, 'log.txt'), log_data, is_init=(not os.path.exists(os.path.join(ckpt_dir, 'log.txt'))))
def main(): parser = argparse.ArgumentParser(description='Create an AWS instance to run Ithemal') parser.add_argument('identity', help='Key identity to create with') parser.add_argument('-n', '--name', help='Name to start the container with', default=None) parser.add_argument('-t', '--type', help='Instance type to start (default: t2.large)', default='t2.large') parser.add_argument('-f', '--force', help='Make a new instance without worrying about old instances', default=False, action='store_true') parser.add_argument('-nc', '--no-connect', help="Don't connect to the instance after it is started", default=False, action='store_true') parser.add_argument('-q', '--queue', metavar='QUEUE_NAME', help='Perform actions consumed from given queue') spot_group = parser.add_mutually_exclusive_group() spot_group.add_argument('--spot-reserved', '-sr', help='Start a spot instance, reserved for a specific duration (between 1 and 6 hours)', type=int, dest='spot', metavar='DURATION') spot_group.add_argument('--spot-preempt', '-sp', help='Start a spot instance, preemptable', action='store_const', const=(- 1), dest='spot') group = parser.add_mutually_exclusive_group() group.add_argument('--prod-ro-db', help='Use the read-only prod database (default)', action='store_true') group.add_argument('--prod-db', help='Use the writeable prod database', action='store_true') group.add_argument('--dev-db', help='Use the development database', action='store_true') args = parser.parse_args() if args.prod_db: db = 'prod' elif args.dev_db: db = 'dev' else: db = 'prod-ro' if (args.spot not in (None, (- 1), 1, 2, 3, 4, 5, 6)): print('Spot duration must be between 1 and 6 hours') return instance_maker = InstanceMaker(args.identity, args.name, args.type, db, args.force, args.no_connect, args.spot, args.queue) instance_maker.start_instance()