code
stringlengths
101
5.91M
class EncodeBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, normalization=None, activation=None): super().__init__() self.c_in = in_channels self.c_out = out_channels layers = [] layers.append(Conv2dSame(self.c_in, self.c_out, kernel_size, stride)) if normalization: layers.append(get_norm(normalization, self.c_out)) if activation: layers.append(get_activation(activation)) self.encode = nn.Sequential(*layers) def forward(self, x): return self.encode(x)
def refer_expression(captions, n_ground=1, prefix='refer expressions:', sort=True): n_boxes = len(captions) if sort: ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values else: ground_indices = torch.randperm(n_boxes)[:n_ground] ground_indices = ground_indices.tolist() source_text = [prefix] target_text = [] if (n_ground == 1): idx = ground_indices[0] source_text.append(f'{captions[idx]}') target_text.append(f'<vis_extra_id_{idx}>') else: for (j, idx) in enumerate(ground_indices): source_text.append(f'<extra_id_{j}>') source_text.append(f'{captions[idx]}') target_text.append(f'<vis_extra_id_{idx}>') source_text = ' '.join(source_text) target_text = ' '.join(target_text) return (source_text, target_text)
def build_dataloader(dataset, collate_fn, is_train, batch_size, n_workers=None, worker_init_fn=None, use_sampler=True): batch_size = (batch_size // dist.get_world_size()) if use_sampler: if is_train: sampler = DistributedSampler(dataset) else: sampler = DistributedSampler_wopadding(dataset) loader = DataLoader(dataset, sampler=sampler, batch_size=batch_size, num_workers=n_workers, pin_memory=True, collate_fn=collate_fn, drop_last=is_train, worker_init_fn=worker_init_fn) else: loader = DataLoader(dataset, batch_size=batch_size, num_workers=n_workers, pin_memory=True, collate_fn=collate_fn, drop_last=is_train, worker_init_fn=worker_init_fn) return loader
def make_dataloaders(cfg, mode='train', distributed=False, num_replicas=None, rank=None, expose_sampler=False): outputs = [] for (i, dataset_cfg) in enumerate(cfg.DATASET): cfg_ = deepcopy(cfg) cfg_.DATASET = dataset_cfg cfg_.TRAIN.BATCH_IMAGES = cfg.TRAIN.BATCH_IMAGES[i] cfg_.VAL.BATCH_IMAGES = cfg.VAL.BATCH_IMAGES[i] cfg_.TEST.BATCH_IMAGES = cfg.TEST.BATCH_IMAGES[i] outputs.append(make_dataloader(cfg_, mode=mode, distributed=distributed, num_replicas=num_replicas, rank=rank, expose_sampler=expose_sampler)) return outputs
class TestPytorchEstimator(TestCase): def setUp(self): init_orca_context(runtime='ray', address='localhost:6379') def tearDown(self): stop_orca_context() def test_train(self): estimator = Estimator.from_torch(model=get_model, optimizer=get_optimizer, loss=nn.BCELoss(), metrics=Accuracy(), config={'lr': 0.01}, workers_per_node=2, backend='ray', sync_stats=True) start_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(start_val_stats) train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32) print(train_stats) end_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(end_val_stats) assert (0 < end_val_stats['Accuracy'] < 1) assert estimator.get_model() dloss = (end_val_stats['val_loss'] - start_val_stats['val_loss']) dacc = (end_val_stats['Accuracy'] - start_val_stats['Accuracy']) print(f'dLoss: {dloss}, dAcc: {dacc}') assert (dloss < 0 < dacc), 'training sanity check failed. loss increased!' def test_singlelist_input(self): estimator = Estimator.from_torch(model=get_model, optimizer=get_optimizer, loss=nn.BCELoss(), metrics=Accuracy(), config={'lr': 0.01, 'model': 'SingleListInputModel', 'dataset': 'SingleListDataset', 'nested_input': True}, workers_per_node=2, backend='ray', sync_stats=True) start_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(start_val_stats) train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32) print(train_stats) end_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(end_val_stats) assert (0 < end_val_stats['Accuracy'] < 1) assert estimator.get_model() dloss = (end_val_stats['val_loss'] - start_val_stats['val_loss']) dacc = (end_val_stats['Accuracy'] - start_val_stats['Accuracy']) print(f'dLoss: {dloss}, dAcc: {dacc}') assert (dloss < 0 < dacc), 'training sanity check failed. loss increased!' def test_multi_input(self): estimator = Estimator.from_torch(model=get_model, optimizer=get_optimizer, loss=nn.BCELoss(), metrics=Accuracy(), config={'lr': 0.01, 'model': 'MultiInputModel', 'dataset': 'SingleListDataset', 'nested_input': False}, workers_per_node=2, backend='ray', sync_stats=True) start_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(start_val_stats) train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32) print(train_stats) end_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(end_val_stats) assert (0 < end_val_stats['Accuracy'] < 1) assert estimator.get_model() dloss = (end_val_stats['val_loss'] - start_val_stats['val_loss']) dacc = (end_val_stats['Accuracy'] - start_val_stats['Accuracy']) print(f'dLoss: {dloss}, dAcc: {dacc}') assert (dloss < 0 < dacc), 'training sanity check failed. loss increased!' def test_dict_input(self): estimator = Estimator.from_torch(model=get_model, optimizer=get_optimizer, loss=nn.BCELoss(), metrics=Accuracy(), config={'lr': 0.01, 'model': 'DictInputNet', 'dataset': 'LinearDataset', 'nested_input': True}, workers_per_node=2, backend='ray', sync_stats=True) start_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(start_val_stats) train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32) print(train_stats) end_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(end_val_stats) assert (0 < end_val_stats['Accuracy'] < 1) assert estimator.get_model() dloss = (end_val_stats['val_loss'] - start_val_stats['val_loss']) dacc = (end_val_stats['Accuracy'] - start_val_stats['Accuracy']) print(f'dLoss: {dloss}, dAcc: {dacc}') assert (dloss < 0 < dacc), 'training sanity check failed. loss increased!' def test_complicated_input(self): estimator = Estimator.from_torch(model=get_model, optimizer=get_optimizer, loss=nn.BCELoss(), metrics=Accuracy(), config={'lr': 0.01, 'model': 'ComplicatedInputModel', 'dataset': 'ComplicatedInputDataset'}, workers_per_node=2, backend='ray', sync_stats=True) start_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(start_val_stats) train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32) print(train_stats) end_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(end_val_stats) assert (0 < end_val_stats['Accuracy'] < 1) assert estimator.get_model() dloss = (end_val_stats['val_loss'] - start_val_stats['val_loss']) dacc = (end_val_stats['Accuracy'] - start_val_stats['Accuracy']) print(f'dLoss: {dloss}, dAcc: {dacc}') assert (dloss < 0 < dacc), 'training sanity check failed. loss increased!' def test_complicated_output(self): estimator = Estimator.from_torch(model=get_model, optimizer=get_optimizer, loss=(lambda _: MultiInputLoss()), metrics=CustomAccuracy(), config={'lr': 0.01, 'model': 'MultiOutputModel', 'dataset': 'MultiTargetDataset', 'nested_input': False}, workers_per_node=2, backend='ray', sync_stats=True) start_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(start_val_stats) train_stats = estimator.fit(train_data_loader, epochs=1, batch_size=32) print(train_stats) end_val_stats = estimator.evaluate(val_data_loader, batch_size=32) print(end_val_stats) assert (0 < end_val_stats['Accuracy'] < 1) assert estimator.get_model() dloss = (end_val_stats['val_loss'] - start_val_stats['val_loss']) dacc = (end_val_stats['Accuracy'] - start_val_stats['Accuracy']) print(f'dLoss: {dloss}, dAcc: {dacc}') assert (dloss < 0 < dacc), 'training sanity check failed. loss increased!'
def classifier_regularize(whichclass, batch): autoencoder.train() autoencoder.zero_grad() (source, target, lengths) = batch source = to_gpu(args.cuda, Variable(source)) target = to_gpu(args.cuda, Variable(target)) flippedclass = abs((2 - whichclass)) labels = to_gpu(args.cuda, Variable(torch.zeros(source.size(0)).fill_(flippedclass))) code = autoencoder(0, source, lengths, noise=False, encode_only=True) code.register_hook(grad_hook_cla) scores = classifier(code) classify_reg_loss = F.binary_cross_entropy(scores.squeeze(1), labels) classify_reg_loss.backward() torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip) optimizer_ae.step() return classify_reg_loss
_grad() def validate(model, val_dataloader): LOGGER.info(f'start running evaluation.') model.eval() tot_score = 0 n_ex = 0 st = time() predictions = {} for (i, batch) in enumerate(val_dataloader): (*batch_inputs, tgt_box_list, obj_boxes_list, sent_ids) = batch scores = model(*batch_inputs, targets=None, compute_loss=False) ixs = torch.argmax(scores, 1).cpu().detach().numpy() for (ix, obj_boxes, tgt_box, sent_id) in zip(ixs, obj_boxes_list, tgt_box_list, sent_ids): pred_box = obj_boxes[ix] predictions['sent_id'] = {'pred_box': pred_box.tolist(), 'tgt_box': tgt_box.tolist()} if (val_dataloader.loader.dataset.computeIoU(pred_box, tgt_box) > 0.5): tot_score += 1 n_ex += 1 tot_time = (time() - st) tot_score = sum(all_gather_list(tot_score)) n_ex = sum(all_gather_list(n_ex)) val_acc = (tot_score / n_ex) val_log = {'valid/acc': val_acc, 'valid/ex_per_s': (n_ex / tot_time)} model.train() LOGGER.info(f'validation ({n_ex} sents) finished in {int(tot_time)} seconds, accuracy: {(val_acc * 100):.2f}%') return (val_log, predictions)
class ConvexSortFunction(Function): def forward(ctx, pts, masks, circular): idx = convex_ext.convex_sort(pts, masks, circular) ctx.mark_non_differentiable(idx) return idx def backward(ctx, grad_output): return ()
def plot_spectrogram_to_numpy(spectrogram): global MATPLOTLIB_FLAG if (not MATPLOTLIB_FLAG): import matplotlib matplotlib.use('Agg') MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt import numpy as np (fig, ax) = plt.subplots(figsize=(10, 2)) im = ax.imshow(spectrogram, aspect='auto', origin='lower', interpolation='none') plt.colorbar(im, ax=ax) plt.xlabel('Frames') plt.ylabel('Channels') plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,))) plt.close() return data
def max_change(model, max_param_change=2.0, max_change_scale=1.0, scale=1.0): scale_factors = [] num_components_updated = 0 for (i, p) in enumerate(model.parameters()): if (i == 0): device = p.device max_param_change = torch.tensor(max_param_change, device=device, requires_grad=False) max_change_scale = torch.tensor(max_change_scale, device=device, requires_grad=False) parameter_delta_sq = torch.tensor(0.0, device=device, requires_grad=False) scale = torch.tensor(scale, device=device, requires_grad=False) dp2 = (p.grad.data * p.grad.data).sum() dp = dp2.pow(0.5) if ((len(p.shape) == 2) and (p.shape[1] == 2344)): max_change = torch.tensor(1.5, device=device, requires_grad=False) elif ((len(p.shape) == 1) and (p.shape[0] == 2344)): max_change = torch.tensor(1.5, device=device, requires_grad=False) else: max_change = torch.tensor(0.75, device=device, requires_grad=False) if ((dp * scale) > (max_change * max_change_scale)): sf = ((max_change * max_change_scale) / (dp * scale)) scale_factors.append(sf) num_components_updated += 1 else: scale_factors.append(torch.tensor(1.0, device=device)) parameter_delta_sq += (scale_factors[(- 1)].pow(2.0) * dp2) parameter_delta = (torch.sqrt(parameter_delta_sq) * torch.abs(scale)) print('Parameter delta', parameter_delta) assert (not torch.isnan(parameter_delta)) assert (not torch.isinf(parameter_delta)) if (parameter_delta > (max_param_change * max_change_scale)): scale.mul_(((max_param_change * max_change_scale) / parameter_delta)) print('Scale ', scale, num_components_updated) for sf in scale_factors: sf.mul_(scale) print('Factors ', scale_factors) for (sf, p) in zip(scale_factors, model.parameters()): p.grad.data.mul_(sf)
class MyTrainingArguments(TrainingArguments): output_dir: str = field(default='./data/passage/star_train/models') logging_dir: str = field(default='./data/passage/star_train/log') padding: bool = field(default=False) optimizer_str: str = field(default='lamb') overwrite_output_dir: bool = field(default=False) per_device_train_batch_size: int = field(default=256, metadata={'help': 'Batch size per GPU/TPU core/CPU for training.'}) gradient_accumulation_steps: int = field(default=1, metadata={'help': 'Number of updates steps to accumulate before performing a backward/update pass.'}) learning_rate: float = field(default=0.0001, metadata={'help': 'The initial learning rate for Adam.'}) weight_decay: float = field(default=0.01, metadata={'help': 'Weight decay if we apply some.'}) adam_beta1: float = field(default=0.9, metadata={'help': 'Beta1 for Adam optimizer'}) adam_beta2: float = field(default=0.999, metadata={'help': 'Beta2 for Adam optimizer'}) adam_epsilon: float = field(default=1e-08, metadata={'help': 'Epsilon for Adam optimizer.'}) max_grad_norm: float = field(default=1.0, metadata={'help': 'Max gradient norm.'}) num_train_epochs: float = field(default=100.0, metadata={'help': 'Total number of training epochs to perform.'}) max_steps: int = field(default=(- 1), metadata={'help': 'If > 0: set total number of training steps to perform. Override num_train_epochs.'}) warmup_steps: int = field(default=1000, metadata={'help': 'Linear warmup over warmup_steps.'}) logging_first_step: bool = field(default=False, metadata={'help': 'Log and eval the first global_step'}) logging_steps: int = field(default=50, metadata={'help': 'Log every X updates steps.'}) save_steps: int = field(default=, metadata={'help': 'Save checkpoint every X updates steps.'}) no_cuda: bool = field(default=False, metadata={'help': 'Do not use CUDA even when it is available'}) seed: int = field(default=42, metadata={'help': 'random seed for initialization'}) fp16: bool = field(default=False, metadata={'help': 'Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit'}) local_rank: int = field(default=(- 1), metadata={'help': 'For distributed training: local_rank'})
class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = self.gc2(x, adj) return x
def valid_raw_data() -> Dict[(str, Dict[(str, Any)])]: with open('tests/mock_data_test.json') as f: read_in_data = json.load(f) return read_in_data
def vgg16(num_classes=1000, pretrained='imagenet'): model = models.vgg16(pretrained=False) if (pretrained is not None): settings = pretrained_settings['vgg16'][pretrained] model = load_pretrained(model, num_classes, settings) return model
class TestPSAMask(object): def test_psa_mask_collect(self): if (not torch.cuda.is_available()): return from mmcv.ops import PSAMask test_loss = Loss() input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32) output_collect = np.fromfile('tests/data/for_psa_mask/psa_output_collect.bin', dtype=np.float32) input = input.reshape((4, 16, 8, 8)) output_collect = output_collect.reshape((4, 64, 8, 8)) label = torch.ones((4, 64, 8, 8)) input = torch.FloatTensor(input) input.requires_grad = True psamask_collect = PSAMask('collect', (4, 4)) test_output = psamask_collect(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().numpy() assert np.allclose(test_output, output_collect) assert (test_output.shape == output_collect.shape) psamask_collect.cuda() input = input.cuda() label = label.cuda() test_output = psamask_collect(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().cpu().numpy() assert np.allclose(test_output, output_collect) assert (test_output.shape == output_collect.shape) def test_psa_mask_distribute(self): if (not torch.cuda.is_available()): return from mmcv.ops import PSAMask test_loss = Loss() input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32) output_distribute = np.fromfile('tests/data/for_psa_mask/psa_output_distribute.bin', dtype=np.float32) input = input.reshape((4, 16, 8, 8)) output_distribute = output_distribute.reshape((4, 64, 8, 8)) label = torch.ones((4, 64, 8, 8)) input = torch.FloatTensor(input) input.requires_grad = True psamask_distribute = PSAMask('distribute', (4, 4)) test_output = psamask_distribute(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().numpy() assert np.allclose(test_output, output_distribute) assert (test_output.shape == output_distribute.shape) psamask_distribute.cuda() input = input.cuda() label = label.cuda() test_output = psamask_distribute(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().cpu().numpy() assert np.allclose(test_output, output_distribute) assert (test_output.shape == output_distribute.shape)
def pytorch_call(device): def wrap(function): def call(*args, **kwargs): results = function(*to_tensor(args, device), **to_tensor(kwargs, device)) return to_numpy(results) return call return wrap
def _convert_responses_to_elastic_constants(response_all: Array) -> Array: if (response_all.shape[0] == 6): cxxxx = response_all[0] cyyyy = response_all[1] cxyxy = (0.25 * response_all[2]) cxxyy = (0.5 * ((response_all[3] - cxxxx) - cyyyy)) cxxxy = (0.25 * ((response_all[4] - (4 * cxyxy)) - cxxxx)) cyyxy = (0.25 * ((response_all[5] - (4 * cxyxy)) - cyyyy)) C = jnp.array([[[[cxxxx, cxxxy], [cxxxy, cxxyy]], [[cxxxy, cxyxy], [cxyxy, cyyxy]]], [[[cxxxy, cxyxy], [cxyxy, cyyxy]], [[cxxyy, cyyxy], [cyyxy, cyyyy]]]]) elif (response_all.shape[0] == 21): cxxxx = response_all[0] cyyyy = response_all[1] czzzz = response_all[2] cyzyz = (response_all[3] / 4.0) cxzxz = (response_all[4] / 4.0) cxyxy = (response_all[5] / 4.0) cyyzz = (((response_all[6] - cyyyy) - czzzz) / 2.0) cxxzz = (((response_all[7] - cxxxx) - czzzz) / 2.0) cxxyy = (((response_all[8] - cxxxx) - cyyyy) / 2.0) cxxyz = (((response_all[9] - cxxxx) - (4.0 * cyzyz)) / 4.0) cxxxz = (((response_all[10] - cxxxx) - (4.0 * cxzxz)) / 4.0) cxxxy = (((response_all[11] - cxxxx) - (4.0 * cxyxy)) / 4.0) cyyyz = (((response_all[12] - cyyyy) - (4.0 * cyzyz)) / 4.0) cyyxz = (((response_all[13] - cyyyy) - (4.0 * cxzxz)) / 4.0) cyyxy = (((response_all[14] - cyyyy) - (4.0 * cxyxy)) / 4.0) czzyz = (((response_all[15] - czzzz) - (4.0 * cyzyz)) / 4.0) czzxz = (((response_all[16] - czzzz) - (4.0 * cxzxz)) / 4.0) czzxy = (((response_all[17] - czzzz) - (4.0 * cxyxy)) / 4.0) cyzxz = (((response_all[18] - (4.0 * cyzyz)) - (4.0 * cxzxz)) / 8.0) cyzxy = (((response_all[19] - (4.0 * cyzyz)) - (4.0 * cxyxy)) / 8.0) cxzxy = (((response_all[20] - (4.0 * cxzxz)) - (4.0 * cxyxy)) / 8.0) C = jnp.array([[[[cxxxx, cxxxy, cxxxz], [cxxxy, cxxyy, cxxyz], [cxxxz, cxxyz, cxxzz]], [[cxxxy, cxyxy, cxzxy], [cxyxy, cyyxy, cyzxy], [cxzxy, cyzxy, czzxy]], [[cxxxz, cxzxy, cxzxz], [cxzxy, cyyxz, cyzxz], [cxzxz, cyzxz, czzxz]]], [[[cxxxy, cxyxy, cxzxy], [cxyxy, cyyxy, cyzxy], [cxzxy, cyzxy, czzxy]], [[cxxyy, cyyxy, cyyxz], [cyyxy, cyyyy, cyyyz], [cyyxz, cyyyz, cyyzz]], [[cxxyz, cyzxy, cyzxz], [cyzxy, cyyyz, cyzyz], [cyzxz, cyzyz, czzyz]]], [[[cxxxz, cxzxy, cxzxz], [cxzxy, cyyxz, cyzxz], [cxzxz, cyzxz, czzxz]], [[cxxyz, cyzxy, cyzxz], [cyzxy, cyyyz, cyzyz], [cyzxz, cyzyz, czzyz]], [[cxxzz, czzxy, czzxz], [czzxy, cyyzz, czzyz], [czzxz, czzyz, czzzz]]]]) else: raise AssertionError('response_all has incorrect shape') return C
def ms_ssim(X, Y, data_range=255, size_average=True, win_size=11, win_sigma=1.5, win=None, weights=None, K=(0.01, 0.03)): if (len(X.shape) != 4): raise ValueError('Input images should be 4-d tensors.') if (not (X.type() == Y.type())): raise ValueError('Input images should have the same dtype.') if (not (X.shape == Y.shape)): raise ValueError('Input images should have the same dimensions.') if (win is not None): win_size = win.shape[(- 1)] if (not ((win_size % 2) == 1)): raise ValueError('Window size should be odd.') smaller_side = min(X.shape[(- 2):]) assert (smaller_side > ((win_size - 1) * (2 ** 4))), ('Image size should be larger than %d due to the 4 downsamplings in ms-ssim' % ((win_size - 1) * (2 ** 4))) if (weights is None): weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] weights = torch.FloatTensor(weights).to(X.device, dtype=X.dtype) if (win is None): win = _fspecial_gauss_1d(win_size, win_sigma) win = win.repeat(X.shape[1], 1, 1, 1) levels = weights.shape[0] mcs = [] for i in range(levels): (ssim_per_channel, cs) = _ssim(X, Y, win=win, data_range=data_range, size_average=False, K=K) if (i < (levels - 1)): mcs.append(torch.relu(cs)) padding = ((X.shape[2] % 2), (X.shape[3] % 2)) X = F.avg_pool2d(X, kernel_size=2, padding=padding) Y = F.avg_pool2d(Y, kernel_size=2, padding=padding) ssim_per_channel = torch.relu(ssim_per_channel) mcs_and_ssim = torch.stack((mcs + [ssim_per_channel]), dim=0) ms_ssim_val = torch.prod((mcs_and_ssim ** weights.view((- 1), 1, 1)), dim=0) if size_average: return ms_ssim_val.mean() else: return ms_ssim_val.mean(1)
def make_logger(log_dir: Path=None, mode: str='train') -> str: logger = logging.getLogger('') version = pkg_resources.require('joeynmt')[0].version if (len(logger.handlers) == 0): logger.setLevel(level=logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s') if (log_dir is not None): if log_dir.is_dir(): log_file = (log_dir / f'{mode}.log') fh = logging.FileHandler(log_file.as_posix(), encoding='utf-8') fh.setLevel(level=logging.DEBUG) logger.addHandler(fh) fh.setFormatter(formatter) sh = logging.StreamHandler() sh.setLevel(logging.INFO) sh.setFormatter(formatter) logger.addHandler(sh) logger.info('Hello! This is Joey-NMT (version %s).', version) return version
class RawDatasetSwbdSre(data.Dataset): def __init__(self, raw_file, list_file): self.raw_file = raw_file with open(list_file) as f: temp = f.readlines() self.utts = [x.strip() for x in temp] def __len__(self): return len(self.utts) def __getitem__(self, index): utt_id = self.utts[index] h5f = h5py.File(self.raw_file, 'r') return h5f[utt_id][:]
_module() class CrossKDRetinaNet(CrossKDSingleStageDetector): def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Union[(dict, list)]: tea_x = self.teacher.extract_feat(batch_inputs) (tea_cls_scores, tea_bbox_preds, tea_cls_hold, tea_reg_hold) = multi_apply(self.forward_crosskd_single, tea_x, module=self.teacher) stu_x = self.extract_feat(batch_inputs) (stu_cls_scores, stu_bbox_preds, stu_cls_hold, stu_reg_hold) = multi_apply(self.forward_crosskd_single, stu_x, module=self) (reused_cls_scores, reused_bbox_preds) = multi_apply(self.reuse_teacher_head, tea_cls_hold, tea_reg_hold, stu_cls_hold, stu_reg_hold) outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs losses = self.loss_by_feat(tea_cls_scores, tea_bbox_preds, tea_x, stu_cls_scores, stu_bbox_preds, stu_x, reused_cls_scores, reused_bbox_preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) return losses def forward_crosskd_single(self, x, module): (cls_feat, reg_feat) = (x, x) (cls_feat_hold, reg_feat_hold) = (x, x) for (i, cls_conv) in enumerate(module.bbox_head.cls_convs): cls_feat = cls_conv(cls_feat, activate=False) if ((i + 1) == self.reused_teacher_head_idx): cls_feat_hold = cls_feat cls_feat = cls_conv.activate(cls_feat) for (i, reg_conv) in enumerate(module.bbox_head.reg_convs): reg_feat = reg_conv(reg_feat, activate=False) if ((i + 1) == self.reused_teacher_head_idx): reg_feat_hold = reg_feat reg_feat = reg_conv.activate(reg_feat) cls_score = module.bbox_head.retina_cls(cls_feat) bbox_pred = module.bbox_head.retina_reg(reg_feat) return (cls_score, bbox_pred, cls_feat_hold, reg_feat_hold) def reuse_teacher_head(self, tea_cls_feat, tea_reg_feat, stu_cls_feat, stu_reg_feat): reused_cls_feat = self.align_scale(stu_cls_feat, tea_cls_feat) reused_reg_feat = self.align_scale(stu_reg_feat, tea_reg_feat) if (self.reused_teacher_head_idx != 0): reused_cls_feat = F.relu(reused_cls_feat) reused_reg_feat = F.relu(reused_reg_feat) module = self.teacher.bbox_head for i in range(self.reused_teacher_head_idx, module.stacked_convs): reused_cls_feat = module.cls_convs[i](reused_cls_feat) reused_reg_feat = module.reg_convs[i](reused_reg_feat) reused_cls_score = module.retina_cls(reused_cls_feat) reused_bbox_pred = module.retina_reg(reused_reg_feat) return (reused_cls_score, reused_bbox_pred) def align_scale(self, stu_feat, tea_feat): (N, C, H, W) = stu_feat.size() stu_feat = stu_feat.permute(1, 0, 2, 3).reshape(C, (- 1)) stu_mean = stu_feat.mean(dim=(- 1), keepdim=True) stu_std = stu_feat.std(dim=(- 1), keepdim=True) stu_feat = ((stu_feat - stu_mean) / (stu_std + 1e-06)) tea_feat = tea_feat.permute(1, 0, 2, 3).reshape(C, (- 1)) tea_mean = tea_feat.mean(dim=(- 1), keepdim=True) tea_std = tea_feat.std(dim=(- 1), keepdim=True) stu_feat = ((stu_feat * tea_std) + tea_mean) return stu_feat.reshape(C, N, H, W).permute(1, 0, 2, 3) def loss_by_feat(self, tea_cls_scores: List[Tensor], tea_bbox_preds: List[Tensor], tea_feats: List[Tensor], cls_scores: List[Tensor], bbox_preds: List[Tensor], feats: List[Tensor], reused_cls_scores: List[Tensor], reused_bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList=None) -> dict: featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.bbox_head.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.bbox_head.get_anchors(featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.bbox_head.get_targets(anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(cat_boxes(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) (losses_cls, losses_bbox) = multi_apply(self.bbox_head.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) losses = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) (losses_cls_kd, losses_reg_kd) = multi_apply(self.pred_mimicking_loss_single, tea_cls_scores, tea_bbox_preds, reused_cls_scores, reused_bbox_preds, all_anchor_list, label_weights_list, avg_factor=avg_factor) losses.update(dict(loss_cls_kd=losses_cls_kd, loss_reg_kd=losses_reg_kd)) if self.with_feat_distill: losses_feat_kd = [self.loss_feat_kd(feat, tea_feat) for (feat, tea_feat) in zip(feats, tea_feats)] losses.update(loss_feat_kd=losses_feat_kd) return losses def pred_mimicking_loss_single(self, tea_cls_score, tea_bbox_pred, reused_cls_score, reused_bbox_pred, anchors, label_weights, avg_factor): tea_cls_score = tea_cls_score.permute(0, 2, 3, 1).reshape((- 1), self.bbox_head.cls_out_channels) reused_cls_score = reused_cls_score.permute(0, 2, 3, 1).reshape((- 1), self.bbox_head.cls_out_channels) label_weights = label_weights.reshape((- 1)) loss_cls_kd = self.loss_cls_kd(reused_cls_score, tea_cls_score, label_weights, avg_factor=avg_factor) bbox_coder = self.bbox_head.bbox_coder tea_bbox_pred = tea_bbox_pred.permute(0, 2, 3, 1).reshape((- 1), bbox_coder.encode_size) reused_bbox_pred = reused_bbox_pred.permute(0, 2, 3, 1).reshape((- 1), bbox_coder.encode_size) anchors = anchors.reshape((- 1), anchors.size((- 1))) tea_bbox_pred = bbox_coder.decode(anchors, tea_bbox_pred) reused_bbox_pred = bbox_coder.decode(anchors, reused_bbox_pred) reg_weights = tea_cls_score.max(dim=1)[0].sigmoid() reg_weights[(label_weights == 0)] = 0 loss_reg_kd = self.loss_reg_kd(reused_bbox_pred, tea_bbox_pred, reg_weights, avg_factor=avg_factor) return (loss_cls_kd, loss_reg_kd)
def _num_samples(x): if hasattr(x, 'fit'): raise TypeError(('Expected sequence or array-like, got estimator %s' % x)) if ((not hasattr(x, '__len__')) and (not hasattr(x, 'shape'))): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError(('Expected sequence or array-like, got %s' % type(x))) if hasattr(x, 'shape'): if (len(x.shape) == 0): raise TypeError(('Singleton array %r cannot be considered a valid collection.' % x)) return x.shape[0] else: return len(x)
class UnitArrayUniformRange(UniformRange, Range[np.ndarray]): def values(self) -> List[np.ndarray]: return [np.array([x]) for x in np.arange(self.start, self.end, self.step, dtype=self.dtype)]
def constant(duration: int, amp: complex, name: str=None) -> SamplePulse: return _sampled_constant_pulse(duration, amp, name=name)
def move_double_solution_cursor(idx, vrblvl=0): if (vrblvl > 0): print('in move_double_solution_cursor, idx :', idx) phc = get_phcfun() aaa = pointer(c_int32(idx)) bbb = pointer(c_int32(0)) ccc = pointer(c_double(0.0)) vrb = c_int32(vrblvl) if (vrblvl > 0): print('-> move_double_solution_cursor calls phc', end='') retval = phc(454, aaa, bbb, ccc, vrb) if (vrblvl > 0): print(', return value :', retval) return aaa[0]
def test_modal_datamodule_audio_param_dataset_train(fs, mocker): dm = kick_modal_datamodule(fs, mocker, batch_size=8, dataset_class=AudioWithParametersDataset, dataset_kwargs={'parameter_key': 'features'}) dm.setup('fit') train_loader = dm.train_dataloader() assert isinstance(train_loader, DataLoader) mock_audio_load = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, dm.num_samples), dm.sample_rate)) mock_feature_load = mocker.patch(f'{TESTED_MODULE}.torch.load', return_value=torch.rand(3, 4, 10)) (audio_batch, parameters) = next(iter(train_loader)) assert (audio_batch.shape == (dm.batch_size, 1, dm.num_samples)) assert (parameters.shape == (dm.batch_size, 3, 4, 10)) assert (mock_audio_load.call_count == dm.batch_size) assert (mock_feature_load.call_count == dm.batch_size)
def updateVocab(words, vocab): for word in words: word = word.lower() if (word not in vocab): vocab[word] = 0 vocab[word] += 1 return vocab
class Sigma_mu_Net(nn.Module): def __init__(self, in_ch, out_ch, mid_ch, layers, kernel_size, bias): super(Sigma_mu_Net, self).__init__() self.layers = layers self.relu = nn.ReLU(inplace=True) self.lyr = [] self.lyr.append(nn.Conv2d(in_ch, mid_ch, kernel_size=1, bias=bias)) self.lyr.append(nn.ReLU(inplace=True)) for l in range((layers - 2)): self.lyr.append(nn.Conv2d(mid_ch, mid_ch, kernel_size=1, bias=bias)) self.lyr.append(nn.ReLU(inplace=True)) self.lyr.append(nn.Conv2d(mid_ch, out_ch, kernel_size=1, bias=bias)) self.conv = nn.Sequential(*self.lyr) init_weights(self.conv) def forward(self, x): x = self.conv(x) return x
def ReadFileGS(x_axis, tthread, batchInterval, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity): (w, h) = (3, len(x_axis)) y = [[] for _ in range(w)] for complexity in x_axis: inputEvents = (tthread * batchInterval) op_gs_path = getPathGS('OPGSA', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_gs_path).readlines() throughput = lines[0].split(': ')[1] y[0].append(float(throughput)) for complexity in x_axis: inputEvents = (tthread * batchInterval) op_dfs_path = getPathGS('TStream', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_dfs_path).readlines() throughput = lines[0].split(': ')[1] y[1].append(float(throughput)) for complexity in x_axis: inputEvents = (tthread * batchInterval) op_dfs_path = getPathGS('PAT', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_dfs_path).readlines() throughput = lines[0].split(': ')[1] y[2].append(float(throughput)) print(y) return y
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-05): c_in = weight.size(0) weight_flat = weight.view(c_in, (- 1)) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = ((weight - mean) / (std + eps)) return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
def sequence_palette(): palette = {(0, 0, 0): 0, (0, 255, 0): 1, (255, 0, 0): 2, (0, 0, 255): 3, (255, 0, 255): 4, (0, 255, 255): 5, (255, 128, 0): 6, (102, 0, 102): 7, (51, 153, 255): 8, (153, 153, 255): 9, (153, 153, 0): 10, (178, 102, 255): 11, (204, 0, 204): 12, (0, 102, 0): 13, (102, 0, 0): 14, (51, 0, 0): 15, (0, 64, 0): 16, (128, 64, 0): 17, (0, 192, 0): 18, (128, 192, 0): 19, (0, 64, 128): 20, (224, 224, 192): 21} return palette
def safe_subprocess_main_with_flags(flags, func, *args, **kwargs): if flags.gui: import matplotlib.pyplot as plt plt.switch_backend('TkAgg') init_worker_process_flags(flags) return func(*args, **kwargs)
def standard_complex_sweep(pols, sols, nvar, pars, start, target): from phcpy.interface import store_standard_solutions as storesols from phcpy.interface import store_standard_system as storesys storesys(pols, nbvar=nvar) storesols(nvar, sols) from phcpy.interface import load_standard_solutions as loadsols from phcpy.phcpy2c3 import py2c_sweep_define_parameters_symbolically as define from phcpy.phcpy2c3 import py2c_sweep_set_standard_start as set_start from phcpy.phcpy2c3 import py2c_sweep_set_standard_target as set_target from phcpy.phcpy2c3 import py2c_sweep_standard_complex_run as run (nbq, nbp) = (len(pols), len(pars)) parnames = ' '.join(pars) nbc = len(parnames) define(nbq, nvar, nbp, nbc, parnames) print('setting the start and the target ...') set_start(nbp, str(start)) set_target(nbp, str(target)) print('calling run in standard double precision ...') run(0, 0.0, 0.0) result = loadsols() return result
def make_scorer(args): bidirectional = args.bidirectional enc_hidden_size = ((hidden_size // 2) if bidirectional else hidden_size) if (args.useObjLabelOrVis == 'none'): (feature_size, action_embedding_size) = ((2048 + 128), (2048 + 128)) elif (args.useObjLabelOrVis == 'vis'): (feature_size, action_embedding_size) = (((2048 + 128) + args.objVisFeatDim), ((2048 + 128) + args.objVisFeatDim)) elif (args.useObjLabelOrVis == 'label'): (feature_size, action_embedding_size) = (((2048 + 128) + args.objLanFeatDim), ((2048 + 128) + args.objLanFeatDim)) elif (args.useObjLabelOrVis == 'both'): feature_size = (((2048 + 128) + args.objVisFeatDim) + args.objLanFeatDim) action_embedding_size = (((2048 + args.objVisFeatDim) + args.objLanFeatDim) + 128) traj_encoder = try_cuda(SpeakerEncoderLSTM(action_embedding_size, feature_size, enc_hidden_size, dropout_ratio, bidirectional=args.bidirectional)) scorer_module = try_cuda(DotScorer(enc_hidden_size, enc_hidden_size)) scorer = Scorer(scorer_module, traj_encoder) if (args.load_scorer is not ''): scorer.load(args.load_scorer) print(colorize(('load scorer traj ' + args.load_scorer))) elif (args.load_traj_encoder is not ''): scorer.load_traj_encoder(args.load_traj_encoder) print(colorize(('load traj encoder ' + args.load_traj_encoder))) return scorer
def enable_falcon_pos_shift_attention(model): for (name, module) in reversed(model._modules.items()): if (len(list(module.children())) > 0): enable_falcon_pos_shift_attention(module) if ('self_attention' == name[(- 14):]): model._modules[name].forward = types.MethodType(falcon_pos_shift_attention_forward, model._modules[name])
_torch class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ((CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()) all_generative_model_classes = ((CTRLLMHeadModel,) if is_torch_available() else ()) pipeline_model_mapping = ({'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification} if is_torch_available() else {}) test_pruning = True test_resize_embeddings = False test_head_masking = False def is_pipeline_test_to_skip(self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name): if (pipeline_test_casse_name == 'ZeroShotClassificationPipelineTests'): return True return False def setUp(self): self.model_tester = CTRLModelTester(self) self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_config(self): self.config_tester.run_common_tests() def test_ctrl_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*config_and_inputs) def test_ctrl_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_model_from_pretrained(self): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = CTRLModel.from_pretrained(model_name) self.assertIsNotNone(model)
class LoopThread(StoppableThread): def __init__(self, func, pausable=True): super(LoopThread, self).__init__() self._func = func self._pausable = pausable if pausable: self._lock = threading.Lock() self.daemon = True def run(self): while (not self.stopped()): if self._pausable: self._lock.acquire() self._lock.release() self._func() def pause(self): assert self._pausable self._lock.acquire() def resume(self): assert self._pausable self._lock.release()
def get_dag_params(obj: FlowSpec): return [{'name': p[0], 'type': ('file' if isinstance(p[1], includefile.IncludeFile) else 'parameter')} for p in obj._get_parameters()]
class Instance(): def __init__(self): self.mount = '/home/wzielonka/Cluster/lustre' self.dst = 'empty' self.src = 'empty' self.device = 'cuda:0' self.actors = [] self.use_mount = os.path.exists(self.mount) def get_dst(self): return (self.dst if (not self.use_mount) else (self.mount + self.dst)) def get_src(self): return (self.src if (not self.use_mount) else (self.mount + self.src)) def get_min_det_score(self): return 0 def preprocess(self): pass def get_images(self): return {} def get_flame_params(self): return {} def get_registrations(self): return {} def get_meshes(self): return {} def transform_mesh(self, path): return None def transform_image(self, img): return [img] def transform_path(self, file): return Path(file).name def get_rotations(self): rots = {} degree = 2.5 step = int(((15 / degree) / 2)) X = range((- step), (step + 1)) degree = 8.0 step = int(((144 / degree) / 2)) Y = range((- step), (step + 1)) for (a, angles) in [('X', X), ('Y', Y)]: r = [] for i in angles: r.append((RotateAxisAngle(float((degree * i)), axis=a, device=self.device), float((degree * i)))) rots[a] = r return rots def update_obj(self, path, fix_mtl=False): mesh = Path(path).stem with open(path, 'r') as file: filedata = file.readlines() input = [] for line in filedata: if (('usemtl' in line) or ('newmtl' in line)): continue input.append(line) output = [] for line in input: if ('mtllib' in line): mtl = line.split(' ')[(- 1)].split('.')[0] line += f'''usemtl {mtl} ''' output.append(line) with open(path, 'w') as file: file_lines = ''.join(output) file.write(file_lines) if (not fix_mtl): return with open(path.replace('obj', 'mtl'), 'r') as file: filedata = file.readlines() output = [] for line in filedata: if ('newmtl' in line): line = (('newmtl ' + mesh) + '\n') output.append(line) with open(path.replace('obj', 'mtl'), 'w') as file: file_lines = ''.join(output) file.write(file_lines)
def make_divisible(value, divisor, min_value=None, min_ratio=0.9): if (min_value is None): min_value = divisor new_value = max(min_value, ((int((value + (divisor / 2))) // divisor) * divisor)) if (new_value < (min_ratio * value)): new_value += divisor return new_value
def dequantize(arr, min_val, max_val, levels, dtype=np.float64): if (not (isinstance(levels, int) and (levels > 1))): raise ValueError('levels must be a positive integer, but got {}'.format(levels)) if (min_val >= max_val): raise ValueError('min_val ({}) must be smaller than max_val ({})'.format(min_val, max_val)) dequantized_arr = ((((arr + 0.5).astype(dtype) * (max_val - min_val)) / levels) + min_val) return dequantized_arr
class UpSampling1D(Layer): def __init__(self, length, bigdl_type='float'): super(UpSampling1D, self).__init__(None, bigdl_type, length)
def get_visited_q_values(q, visits, state): values = q[state] state_visits = visits[state] visited_modifier = np.zeros(values.size) visited_modifier[(state_visits == 0)] = np.inf return (values - visited_modifier)
def parse_args(): usage = '\n1. create wrong.txt, correct.txt and mask_probability.sav by:\npython create_data.py -f /path/to/train.txt\n\n\n2. specify output dir by:\npython create_data.py -f /path/to/train.txt -o /path/to/dir/\n\n' parser = argparse.ArgumentParser(description='A module for FASPell - Fast, Adaptable, Simple, Powerful Chinese Spell Checker', usage=usage) parser.add_argument('--file', '-f', type=str, default=None, help='original training data.') parser.add_argument('--output', '-o', type=str, default='', help='output a file a dir; default is current directory.') args = parser.parse_args() return args
def print_df_stats(df, df_train, df_val): headers = ['Images', '-> AD', '-> CN', 'Patients', '-> AD', '-> CN'] def get_stats(df): df_ad = df[(df['DX'] == 'Dementia')] df_cn = df[(df['DX'] == 'CN')] return [len(df), len(df_ad), len(df_cn), len(df['PTID'].unique()), len(df_ad['PTID'].unique()), len(df_cn['PTID'].unique())] stats = [] stats.append((['All'] + get_stats(df))) stats.append((['Train'] + get_stats(df_train))) stats.append((['Val'] + get_stats(df_val))) print(tabulate(stats, headers=headers)) print()
def prof(args): print('| \\# Vars | \\# Batch | Linear f/b | qpth f/b |') nBatch = 128 (all_linearf, all_qpthf) = ([], []) (all_linearb, all_qpthb) = ([], []) for nz in [10, 50, 100, 500]: (linearf_times, qpthf_times, linearb_times, qpthb_times) = prof_instance(nz, nBatch, args.nTrials) all_linearf.append((linearf_times.mean(), linearf_times.std())) all_qpthf.append((qpthf_times.mean(), qpthf_times.std())) all_linearb.append((linearb_times.mean(), linearb_times.std())) all_qpthb.append((qpthb_times.mean(), qpthb_times.std())) print(((('| {:5d} ' * 2) + ('| ${:.3e} \\pm {:.3e}$ s ' * 4)) + '|').format(nz, nBatch, linearf_times.mean(), linearf_times.std(), linearb_times.mean(), linearb_times.std(), qpthf_times.mean(), qpthf_times.std(), qpthb_times.mean(), qpthb_times.std())) print('linearf = ', all_linearf) print('qpthf = ', all_qpthf) print('linearb = ', all_linearb) print('qpthb = ', all_qpthb)
def _worker_run_map(all_args): try: (runner, args) = all_args return runner(singleton_pool.G, *args) except Exception: raise Exception(''.join(traceback.format_exception(*sys.exc_info())))
def unique_filename(prefix: str='', suffix: str='', n_digits: int=2, count_start: int=0) -> str: fmt = (('{:0' + str(n_digits)) + 'd}') if (prefix and (prefix[(- 1)] not in {'/', '\\'})): prefix += '_' while True: filename = ((prefix + fmt.format(count_start)) + suffix) if (not os.path.exists(filename)): return filename else: count_start += 1
def main(): (args, config) = parse_args() (rank, model) = vis_net(args, config, args.save_dir)
def main(): args = parse_args() assert args.out.endswith('pkl'), 'The output file name must be pkl suffix' cfg = Config.fromfile(args.config) dataloader_cfg = cfg.get(f'{args.dataset}_dataloader') ann_file = osp.join(dataloader_cfg.dataset.data_root, dataloader_cfg.dataset.ann_file) img_prefix = osp.join(dataloader_cfg.dataset.data_root, dataloader_cfg.dataset.data_prefix['img']) print(f"{('-' * 5)} Start Processing {('-' * 5)}") if ann_file.endswith('csv'): data_infos = get_metas_from_csv_style_ann_file(ann_file) elif ann_file.endswith('txt'): data_infos = get_metas_from_txt_style_ann_file(ann_file) else: shuffix = ann_file.split('.')[(- 1)] raise NotImplementedError(f'File name must be csv or txt suffix but get {shuffix}') print(f'Successfully load annotation file from {ann_file}') print(f'Processing {len(data_infos)} images...') pool = Pool(args.nproc) image_metas = pool.starmap(get_image_metas, zip(data_infos, [img_prefix for _ in range(len(data_infos))])) pool.close() root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0] save_path = osp.join(root_path, args.out) dump(image_metas, save_path, protocol=4) print(f'Image meta file save to: {save_path}')
def process_bpe_symbol(sentence: str, bpe_symbol: str): if (bpe_symbol == 'sentencepiece'): sentence = sentence.replace(' ', '').replace('', ' ').strip() elif (bpe_symbol == '_EOW'): sentence = sentence.replace(' ', '').replace('_EOW', ' ').strip() elif (bpe_symbol is not None): sentence = (sentence + ' ').replace(bpe_symbol, '').rstrip() return sentence
def get_bytes(buffer: Union[(Dict, np.ndarray)]) -> int: if isinstance(buffer, dict): return sum([get_bytes(v) for v in buffer.values()]) elif isinstance(buffer, np.ndarray): return buffer.nbytes else: raise ValueError('Unsupported type passed to `get_bytes`.')
class RandomSearch(AbstractSearch): def __init__(self, policies, instantiate=True): self.policies = policies self.instantiate = instantiate def __call__(self, root, *args, **kwargs): start_time = timeit.default_timer() node = root path = [] while True: path.append(node) (action, condition) = self.policies.sample(node) if (action is not None): node.children.append(Node(action=action)) child = node.children[(- 1)] if self.instantiate: node.instantiate(child) if self.policies._initialize: self.policies._initialize(child) node = child elapsed = (timeit.default_timer() - start_time) return (elapsed, path)
class nnUNetTrainerV2_NoNormalization_lr1en3(nnUNetTrainerV2_NoNormalization): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.initial_lr = 0.001
def generate(generations, population, all_possible_genes, dataset): logging.info('***generate(generations, population, all_possible_genes, dataset)***') evolver = Evolver(all_possible_genes) genomes = evolver.create_population(population) for i in range(generations): logging.info(('***Now in generation %d of %d***' % ((i + 1), generations))) print_genomes(genomes) train_genomes(genomes, dataset) average_accuracy = get_average_accuracy(genomes) logging.info(('Generation average: %.2f%%' % (average_accuracy * 100))) logging.info(('-' * 80)) if (i != (generations - 1)): genomes = evolver.evolve(genomes) genomes = sorted(genomes, key=(lambda x: x.accuracy), reverse=True) print_genomes(genomes[:5])
class ModelBuilder(nn.Module): def __init__(self): super(ModelBuilder, self).__init__() self.backbone = get_backbone(cfg.BACKBONE.TYPE, **cfg.BACKBONE.KWARGS) if cfg.ADJUST.ADJUST: self.neck = get_neck(cfg.ADJUST.TYPE, **cfg.ADJUST.KWARGS) self.kpn_head = get_kpn_head(cfg.KPN.TYPE, **cfg.KPN.KWARGS) def template(self, z): zf = self.backbone(z) if cfg.ADJUST.ADJUST: zf = self.neck(zf) self.zf = zf return self.zf def extract(self, x): return self.neck(self.backbone(x)) def track(self, z, x): xf = self.backbone(x) xf = self.neck(xf) (zfe, xfe) = (z, xf) if cfg.TRAIN.OFFSETS: (heatmap, objsize, offsets) = self.kpn_head(zfe, xfe) return {'zfeature': zfe, 'xfeature': xfe, 'heatmap': heatmap, 'offsets': offsets, 'objsize': objsize} else: (heatmap, objsize) = self.kpn_head(zfe, xfe) return {'zfeature': zfe, 'xfeature': xfe, 'heatmap': heatmap, 'objsize': objsize} def forward(self, data): template = data['template'].cuda() search = data['search'].cuda() label_heatmap = [hm.cuda() for hm in data['label_heatmap']] label_objsize = data['label_objsize'].cuda() zf = self.backbone(template) xf = self.backbone(search) if cfg.ADJUST.ADJUST: zf = self.neck(zf) xf = self.neck(xf) if cfg.TRAIN.OFFSETS: label_offsets = data['label_offsets'].cuda() (heatmap, objsize, offsets) = self.kpn_head(zf, xf) else: (heatmap, objsize) = self.kpn_head(zf, xf) if cfg.TRAIN.INTER_SUPER: heatmap = [torch.sigmoid(hm) for hm in heatmap] heatmap_loss = [focal_loss(heatmap[i], label_heatmap[i]) for i in range(cfg.TRAIN.STACK)] heatmap_loss = (sum(heatmap_loss) / cfg.TRAIN.STACK) pos_inds = label_heatmap[0].gt(0) indx = torch.cat((pos_inds, pos_inds), 1) if cfg.TRAIN.OFFSETS: if cfg.TRAIN.SAMEOFF: offsets_loss = [F.smooth_l1_loss(offsets[i][indx], label_offsets[indx]) for i in range(cfg.TRAIN.STACK)] else: weight_offsets = label_heatmap[0][pos_inds] weight_offsets = torch.cat((weight_offsets, weight_offsets), 0) offsets_loss = [F.smooth_l1_loss((weight_offsets * offsets[i][indx]), (weight_offsets * label_offsets[indx])) for i in range(cfg.TRAIN.STACK)] offsets_loss = (sum(offsets_loss) / cfg.TRAIN.STACK) objsize_loss = [F.smooth_l1_loss(objsize[i][indx], label_objsize[indx]) for i in range(cfg.TRAIN.STACK)] objsize_loss = (sum(objsize_loss) / cfg.TRAIN.STACK) else: heatmap = torch.sigmoid(heatmap[(- 1)]) heatmap_loss = focal_loss(heatmap, label_heatmap[0]) pos_inds = label_heatmap[0].gt(0) indx = torch.cat((pos_inds, pos_inds), 1) if cfg.TRAIN.OFFSETS: if cfg.TRAIN.SAMEOFF: offsets_loss = F.smooth_l1_loss(offsets[(- 1)][indx], label_offsets[indx]) else: weight_offsets = label_heatmap[0][pos_inds] weight_offsets = torch.cat((weight_offsets, weight_offsets), 0) offsets_loss = F.smooth_l1_loss((weight_offsets * offsets[(- 1)][indx]), (weight_offsets * label_offsets[indx])) objsize_loss = F.smooth_l1_loss(objsize[(- 1)][indx], label_objsize[indx]) outputs = {} if cfg.TRAIN.OFFSETS: outputs['total_loss'] = (((cfg.TRAIN.HM_WEIGHT * heatmap_loss) + (cfg.TRAIN.OF_WEIGHT * offsets_loss)) + (cfg.TRAIN.WH_WEIGHT * objsize_loss)) outputs['offsets_loss'] = offsets_loss else: outputs['total_loss'] = ((cfg.TRAIN.HM_WEIGHT * heatmap_loss) + (cfg.TRAIN.WH_WEIGHT * objsize_loss)) outputs['heatmap_loss'] = heatmap_loss outputs['objsize_loss'] = objsize_loss return outputs
def warn(msg, *args): if (MIN_LEVEL <= WARN): warnings.warn(colorize(('%s: %s' % ('WARN', (msg % args))), 'yellow'))
class Net(): def __init__(self, data_module, _num_sample_factors=None): if (_num_sample_factors is None): self._num_sample_factors = 1 else: self._num_sample_factors = _num_sample_factors self._num_samples = data_module.num_samples() self.data_module = data_module self._total_pos = (data_module.num_samples_finished() + (self._num_samples * data_module.epoch())) self._total_iter = data_module.iter() link_with_instance(self, self.data_module) def __call__(self, *args, **kwargs): return self.next_batch(*args, **kwargs) def next_batch(self, batch_size): self._total_iter += 1 self._total_pos += batch_size data = self.data_module(batch_size) return data def num_samples_finished(self): return ((self._total_pos % self._num_samples) * self._num_sample_factors) def epoch(self): return ((self._total_pos // self._num_samples) * self._num_sample_factors) def iter(self): return (self._total_iter * self._num_sample_factors) def pos(self): return (self._total_pos * self._num_sample_factors) def set_iter(self, new_iter): self._total_iter = (new_iter * self._num_sample_factors) def set_pos(self, new_pos): self._total_pos = (new_pos * self._num_sample_factors) def reset(self): self._total_pos = 0 self._total_iter = 0 def num_samples(self): return (self._num_samples * self._num_sample_factors)
class COCODataset(torchvision.datasets.coco.CocoDetection): def __init__(self, ann_file, root, remove_images_without_annotations, transforms=None, is_source=True): super(COCODataset, self).__init__(root, ann_file) self.ids = sorted(self.ids) if remove_images_without_annotations: ids = [] for img_id in self.ids: ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None) anno = self.coco.loadAnns(ann_ids) if has_valid_annotation(anno): ids.append(img_id) self.ids = ids self.json_category_id_to_contiguous_id = {v: (i + 1) for (i, v) in enumerate(self.coco.getCatIds())} self.contiguous_category_id_to_json_id = {v: k for (k, v) in self.json_category_id_to_contiguous_id.items()} self.id_to_img_map = {k: v for (k, v) in enumerate(self.ids)} self._transforms = transforms self.is_source = is_source def __getitem__(self, idx): (img, anno) = super(COCODataset, self).__getitem__(idx) anno = [obj for obj in anno if (obj['iscrowd'] == 0)] boxes = [obj['bbox'] for obj in anno] boxes = torch.as_tensor(boxes).reshape((- 1), 4) target = BoxList(boxes, img.size, mode='xywh').convert('xyxy') classes = [obj['category_id'] for obj in anno] classes = [self.json_category_id_to_contiguous_id[c] for c in classes] classes = torch.tensor(classes) target.add_field('labels', classes) masks = [obj['segmentation'] for obj in anno] masks = SegmentationMask(masks, img.size) target.add_field('masks', masks) domain_labels = (torch.ones_like(classes, dtype=torch.bool) if self.is_source else torch.zeros_like(classes, dtype=torch.bool)) target.add_field('is_source', domain_labels) if (anno and ('keypoints' in anno[0])): keypoints = [obj['keypoints'] for obj in anno] keypoints = PersonKeypoints(keypoints, img.size) target.add_field('keypoints', keypoints) target = target.clip_to_image(remove_empty=True) if (self._transforms is not None): (img, target) = self._transforms(img, target) return (img, target, idx) def get_img_info(self, index): img_id = self.id_to_img_map[index] img_data = self.coco.imgs[img_id] return img_data
class Algorithm(object): def __init__(self, parameters=None, **kargs): self._default_keyword_parameters = {'maximum_iteration': 100, 'verbose': False, 'recording_functions': {}, 'display_time': 0.5, 'display_function': self.__no_display} self._default_keyword_parameters.setdefault('relative_difference_tolerance', 0) self._iteration = 0 self._maximum_iteration = 0 self._data = {} self._data['recording'] = {} self._parameters = {'verbose': 0} self.add_parameters(parameters, kargs) self.__recording_functions = {} self._current_iterate = None def __no_display(self, data): pass def add_parameters(self, parameters, kargs): if (parameters is not None): self._parameters.update(parameters) self._parameters.update(kargs) def _initialize(self): raise NotImplementedError def _terminate(self): raise NotImplementedError def _iterate(self): raise NotImplementedError def name(self): return self.__class__.__name__ def reset_parameters(self): self._parameters = {} def _extract_current_iterate_matrix(self): raise NotImplementedError def run(self, parameters=None, **kargs): self.add_parameters(parameters, kargs) self._parameters = fill_parameters(self._parameters, self._default_keyword_parameters, self.name()) display = 0 if (self._parameters['display_function'].__name__ is not '__no_display'): self._parameters['verbose'] = 1 display = 1 pyplot.figure((self.__class__.__name__ + ' display')) self._maximum_iteration = self._parameters['maximum_iteration'] self._initialize() rel_tol = self._parameters['relative_difference_tolerance'] if (rel_tol > 0): try: self._current_iterate = self._extract_current_iterate_matrix().copy() except NotImplementedError: rel_tol = 0 if (self._parameters['verbose'] != 0): print('Relative difference tolerance stopping criteria\ndesactivated (_extract_current_iterate_matrix not implemented).') last_display_time = time.time() beginning_time = last_display_time for self._iteration in range(0, self._maximum_iteration): self._iterate() self.__recording() if display: if (((time.time() - last_display_time) > self._parameters['display_time']) or (self._maximum_iteration == (self._iteration + 1))): pyplot.clf() collect() self._parameters['display_function'](self._data) QApplication.processEvents() last_display_time = time.time() if self._parameters['verbose']: print(('Iteration %s/%s.' % ((self._iteration + 1), self._maximum_iteration))) if (rel_tol > 0): new_iterate = self._extract_current_iterate_matrix().copy() if ((linalg.norm((new_iterate - self._current_iterate)) / (linalg.norm(self._current_iterate) + finfo(float).eps)) < rel_tol): if self._parameters['verbose']: print('Stopping (relative difference is too small)') break else: self._current_iterate = new_iterate self._terminate() if self._parameters['verbose']: print(('Total computation time: %s.' % (time.time() - beginning_time))) return self._data def __recording(self): for k in self._parameters['recording_functions'].keys(): temp = self._parameters['recording_functions'][k](self._data) if (self._iteration == 0): if (isscalar(temp) or (((type(temp) == list) or (type(temp) == ndarray)) and (len(temp) == 1))): self._data['recording'][k] = (NaN * ones(self._maximum_iteration)) if self._parameters['verbose']: print(('Adding recorded feature: %s.' % k)) elif (type(temp) == dict): string = 'Adding recorded features: ' for i in temp.keys(): string = ((string + i) + ', ') self._data['recording'][i] = (NaN * ones(self._maximum_iteration)) if (self._parameters['verbose'] == 1): print((string[0:(len(string) - 2)] + '.')) else: raise 'Recording function is neither scalar nor dictionary' if (isscalar(temp) or (((type(temp) == list) or (type(temp) == ndarray)) and (len(temp) == 1))): self._data['recording'][k][self._iteration] = temp elif (type(temp) == dict): for i in temp.keys(): self._data['recording'][i][self._iteration] = temp[i]
def load_BART_or_PEGASUS(mname): if ('bart' in mname.lower()): from transformers import BartTokenizer, BartForConditionalGeneration model = BartForConditionalGeneration.from_pretrained(mname) tokenizer = BartTokenizer.from_pretrained(mname) elif ('pegasus' in mname.lower()): from transformers import PegasusTokenizer, PegasusForConditionalGeneration model = PegasusForConditionalGeneration.from_pretrained(mname) tokenizer = PegasusTokenizer.from_pretrained(mname) else: raise NotImplementedError('UNKOWN model name.') return (model, tokenizer)
def getlocaltime(): date = time.strftime('%y-%m-%d', time.localtime()) current_time = time.strftime('%H:%M:%S', time.localtime())
def to_list(obj): if (not isinstance(obj, list)): return [obj] else: return obj
class XnliProcessor(DataProcessor): def __init__(self, language, train_language=None): self.language = language self.train_language = train_language def get_train_examples(self, data_dir): lg = (self.language if (self.train_language is None) else self.train_language) lines = self._read_tsv(os.path.join(data_dir, 'XNLI-MT-1.0/multinli/multinli.train.{}.tsv'.format(lg))) examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = ('%s-%s' % ('train', i)) text_a = line[0] text_b = line[1] label = ('contradiction' if (line[2] == 'contradictory') else line[2]) assert isinstance(text_a, str), f'Training input {text_a} is not a string' assert isinstance(text_b, str), f'Training input {text_b} is not a string' assert isinstance(label, str), f'Training label {label} is not a string' examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_test_examples(self, data_dir): lines = self._read_tsv(os.path.join(data_dir, 'XNLI-1.0/xnli.test.tsv')) examples = [] for (i, line) in enumerate(lines): if (i == 0): continue language = line[0] if (language != self.language): continue guid = ('%s-%s' % ('test', i)) text_a = line[6] text_b = line[7] label = line[1] assert isinstance(text_a, str), f'Training input {text_a} is not a string' assert isinstance(text_b, str), f'Training input {text_b} is not a string' assert isinstance(label, str), f'Training label {label} is not a string' examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): return ['contradiction', 'entailment', 'neutral']
def generate_training_labels(data_folder: Path, resume): print(f'Processing label data in {data_folder}') for city in ['london', 'madrid', 'melbourne']: data_folder_train_city_labels = (((data_folder / 'train') / city) / 'labels') data_folder_train_city_labels.mkdir(exist_ok=True, parents=True) generate_eta_labels(city, in_folder=(data_folder / 'speed_classes'), out_folder=(data_folder / 'train'), road_graph_folder=(data_folder / 'road_graph'), resume=resume)
class TrainSetTransform(): def __init__(self, aug_mode): self.aug_mode = aug_mode if (self.aug_mode == 1): t = [RandomRotation(max_theta=5, axis=np.array([0, 0, 1])), RandomFlip([0.25, 0.25, 0.0])] else: raise NotImplementedError('Unknown aug_mode: {}'.format(self.aug_mode)) self.transform = transforms.Compose(t) def __call__(self, e): if (self.transform is not None): e = self.transform(e) return e
class HSmooth(HBox): def __init__(self, *args, **kargs): super(HSmooth, self).__init__(*args, **kargs) def customRelu(self): return self.creluSmooth() def copy(hbox): return HSmooth(hbox.head, hbox.beta, hbox.errors) def box(*args, **kargs): return HSmooth.copy(HBox.box(*args, **kargs)) def line(*args, **kargs): return HSmooth.copy(HBox.line(*args, **kargs))
def process_time(result_text: str, doc) -> dict: mentioned_time = {'time': [], 'period': []} for ent in doc.ents: if (ent.label_ == 'DATE'): if bool(re.search('\\d', str(ent))): if ('to' in result_text): if ('to' in ent.text): cur_periods = ent.text.split(' to ') mentioned_time['period'].extend(cur_periods) elif ((len(mentioned_time['period']) > 0) and (mentioned_time['period'][(- 1)] == ent.text)): mentioned_time['period'].pop() else: mentioned_time['period'].append(ent.text) else: mentioned_time['time'].append(ent.text) if ((len(mentioned_time['period']) % 2) != 0): mentioned_time['time'] = list(set((mentioned_time['time'] + mentioned_time['period']))) mentioned_time['period'] = [] return mentioned_time
class UperNetPyramidPoolingModule(nn.Module): def __init__(self, pool_scales: Tuple[(int, ...)], in_channels: int, channels: int, align_corners: bool) -> None: super().__init__() self.pool_scales = pool_scales self.align_corners = align_corners self.in_channels = in_channels self.channels = channels self.blocks = [] for (i, pool_scale) in enumerate(pool_scales): block = UperNetPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels) self.blocks.append(block) self.add_module(str(i), block) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: ppm_outs = [] for ppm in self.blocks: ppm_out = ppm(x) upsampled_ppm_out = nn.functional.interpolate(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners) ppm_outs.append(upsampled_ppm_out) return ppm_outs
class TFXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def main_worker(gpu, ngpus_per_node, args): args.gpu = gpu if (args.multiprocessing_distributed and (args.gpu != 0)): def print_pass(*args): pass builtins.print = print_pass if (args.gpu is not None): print('Use GPU: {} for training'.format(args.gpu)) if args.distributed: if ((args.dist_url == 'env://') and (args.rank == (- 1))): args.rank = int(os.environ['RANK']) if args.multiprocessing_distributed: args.rank = ((args.rank * ngpus_per_node) + gpu) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) if (args.arch == 'efficientnet-b0'): print("=> EFFIF: creating model '{}'".format(args.arch)) base_model = efficientnet_b0 else: print("=> ELSE: creating model '{}'".format(args.arch)) base_model = models.__dict__[args.arch] model = moco.my_builder.MoCo(base_model, args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp, args.arch, args.teacher_path) print(model) if args.distributed: if (args.gpu is not None): torch.cuda.set_device(args.gpu) model.cuda(args.gpu) args.batch_size = int((args.batch_size / ngpus_per_node)) args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node)) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model.cuda() model = torch.nn.parallel.DistributedDataParallel(model) elif (args.gpu is not None): torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) raise NotImplementedError('Only DistributedDataParallel is supported.') else: raise NotImplementedError('Only DistributedDataParallel is supported.') criterion = [nn.CrossEntropyLoss().cuda(args.gpu), nn.MSELoss(reduction='sum').cuda(args.gpu)] optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if (args.gpu is None): checkpoint = torch.load(args.resume) else: loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) if args.teacher_path: if os.path.isfile(args.teacher_path): print('loading teacher from {}'.format(args.teacher_path)) if (args.gpu is None): teacher_checkpoint = torch.load(args.teacher_path) else: loc = 'cuda:{}'.format(args.gpu) teacher_checkpoint = torch.load(args.teacher_path, map_location=loc) param_dict = teacher_checkpoint['state_dict'] new_param_dict = {} for (k, v) in param_dict.items(): new_param_dict[k.replace('encoder', 'teacher_encoder')] = v model.load_state_dict(new_param_dict, strict=False) cudnn.benchmark = True traindir = os.path.join(args.data, 'train') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if args.aug_plus: augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize] else: augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize] train_dataset = datasets.ImageFolder(traindir, moco.loader.TwoCropsTransform(transforms.Compose(augmentation))) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True) for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args) train(train_loader, model, criterion, optimizer, epoch, args) if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))): save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename='./ckpt/checkpoint_{:04d}.pth.tar'.format(epoch))
def load_pickle(path): print('load', path) with open(path, mode='rb') as f: return pickle.load(f)
def test2(): graph2 = {('A', 'B'): 1, ('B', 'C'): 2, ('C', 'D'): 3, ('D', 'E'): (- 1), ('E', 'F'): 4} result = shortest_paths('A', graph2) expected = {'A': 0, 'C': 3, 'B': 1, 'E': 5, 'D': 6, 'F': 9} assert (result == expected)
def train(params: Params): if (not os.path.exists(MODEL_FOLDER)): os.mkdir(MODEL_FOLDER) assert os.path.exists(MODEL_FOLDER), ' Cannot create folder to save trained model: {}'.format(MODEL_FOLDER) dataloaders = make_dataloaders(params) print('Training set: Dataset size: {}'.format(len(dataloaders['train'].dataset))) if ('val' in dataloaders): print('Validation set: Dataset size: {}'.format(len(dataloaders['val'].dataset))) device = ('cuda' if torch.cuda.is_available() else 'cpu') model = footandball.model_factory(params.model, 'train') model.print_summary(show_architecture=True) model = model.to(device) model_name = ('model_' + time.strftime('%Y%m%d_%H%M')) print('Model name: {}'.format(model_name)) optimizer = optim.Adam(model.parameters(), lr=params.lr) scheduler_milestones = [int((params.epochs * 0.75))] scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, scheduler_milestones, gamma=0.1) train_model(model, optimizer, scheduler, params.epochs, dataloaders, device, model_name)
def _weight_align(model, ref_model, tp_model): _tp_weigth_align(tp_model, ref_model) _ds_pipe_weight_align(model, tp_model)
_tf class TFModelTesterMixin(): model_tester = None all_model_classes = () test_torchscript = True test_pruning = True test_resize_embeddings = True is_encoder_decoder = False def test_initialization(self): pass def test_save_load(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) after_outputs = model(inputs_dict) out_1 = after_outputs[0].numpy() out_2 = outputs[0].numpy() out_1 = out_1[(~ np.isnan(out_1))] out_2 = out_2[(~ np.isnan(out_2))] max_diff = np.amax(np.abs((out_1 - out_2))) self.assertLessEqual(max_diff, 1e-05) def test_pt_tf_model_equivalence(self): if (not is_torch_available()): return import torch import transformers (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: pt_model_class_name = model_class.__name__[2:] pt_model_class = getattr(transformers, pt_model_class_name) config.output_hidden_states = True tf_model = model_class(config) pt_model = pt_model_class(config) tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=inputs_dict) pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model) pt_model.eval() pt_inputs_dict = dict(((name, torch.from_numpy(key.numpy()).to(torch.long)) for (name, key) in inputs_dict.items())) with torch.no_grad(): pto = pt_model(**pt_inputs_dict) tfo = tf_model(inputs_dict, training=False) tf_hidden_states = tfo[0].numpy() pt_hidden_states = pto[0].numpy() tf_nans = np.copy(np.isnan(tf_hidden_states)) pt_nans = np.copy(np.isnan(pt_hidden_states)) pt_hidden_states[tf_nans] = 0 tf_hidden_states[tf_nans] = 0 pt_hidden_states[pt_nans] = 0 tf_hidden_states[pt_nans] = 0 max_diff = np.amax(np.abs((tf_hidden_states - pt_hidden_states))) if (max_diff >= 0.02): print('===') print(model_class) print(config) print(inputs_dict) print(pt_inputs_dict) self.assertLessEqual(max_diff, 0.02) with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, 'pt_model.bin') torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path) tf_checkpoint_path = os.path.join(tmpdirname, 'tf_model.h5') tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path) pt_model.eval() pt_inputs_dict = dict(((name, torch.from_numpy(key.numpy()).to(torch.long)) for (name, key) in inputs_dict.items())) with torch.no_grad(): pto = pt_model(**pt_inputs_dict) tfo = tf_model(inputs_dict) tfo = tfo[0].numpy() pto = pto[0].numpy() tf_nans = np.copy(np.isnan(tfo)) pt_nans = np.copy(np.isnan(pto)) pto[tf_nans] = 0 tfo[tf_nans] = 0 pto[pt_nans] = 0 tfo[pt_nans] = 0 max_diff = np.amax(np.abs((tfo - pto))) self.assertLessEqual(max_diff, 0.02) def test_compile_tf_model(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() if self.is_encoder_decoder: input_ids = {'decoder_input_ids': tf.keras.Input(batch_shape=(2, 2000), name='decoder_input_ids', dtype='int32'), 'encoder_input_ids': tf.keras.Input(batch_shape=(2, 2000), name='encoder_input_ids', dtype='int32')} else: input_ids = tf.keras.Input(batch_shape=(2, 2000), name='input_ids', dtype='int32') optimizer = tf.keras.optimizers.Adam(learning_rate=3e-05, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: outputs = model(inputs_dict) model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) outputs_dict = model(input_ids) hidden_states = outputs_dict[0] outputs = tf.keras.layers.Dense(2, activation='softmax', name='outputs')(hidden_states) extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs]) extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) def test_keyword_and_dict_args(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs_dict = model(inputs_dict) inputs_keywords = copy.deepcopy(inputs_dict) input_ids = inputs_keywords.pop(('input_ids' if (not self.is_encoder_decoder) else 'decoder_input_ids'), None) outputs_keywords = model(input_ids, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs((output_dict - output_keywords))), 1e-06) def test_attention_outputs(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() decoder_seq_length = (self.model_tester.decoder_seq_length if hasattr(self.model_tester, 'decoder_seq_length') else self.model_tester.seq_length) encoder_seq_length = (self.model_tester.encoder_seq_length if hasattr(self.model_tester, 'encoder_seq_length') else self.model_tester.seq_length) decoder_key_length = (self.model_tester.key_length if hasattr(self.model_tester, 'key_length') else decoder_seq_length) encoder_key_length = (self.model_tester.key_length if hasattr(self.model_tester, 'key_length') else encoder_seq_length) for model_class in self.all_model_classes: config.output_attentions = True config.output_hidden_states = False model = model_class(config) outputs = model(inputs_dict) attentions = [t.numpy() for t in outputs[(- 1)]] self.assertEqual(model.config.output_attentions, True) self.assertEqual(model.config.output_hidden_states, False) self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual(list(attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length]) out_len = len(outputs) if self.is_encoder_decoder: self.assertEqual((out_len % 2), 0) decoder_attentions = outputs[((out_len // 2) - 1)] self.assertEqual(model.config.output_attentions, True) self.assertEqual(model.config.output_hidden_states, False) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual(list(decoder_attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length]) config.output_attentions = True config.output_hidden_states = True model = model_class(config) outputs = model(inputs_dict) self.assertEqual((out_len + (2 if self.is_encoder_decoder else 1)), len(outputs)) self.assertEqual(model.config.output_attentions, True) self.assertEqual(model.config.output_hidden_states, True) attentions = [t.numpy() for t in outputs[(- 1)]] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual(list(attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length]) def test_hidden_states_output(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True config.output_attentions = False model = model_class(config) outputs = model(inputs_dict) hidden_states = [t.numpy() for t in outputs[(- 1)]] self.assertEqual(model.config.output_attentions, False) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(len(hidden_states), (self.model_tester.num_hidden_layers + 1)) self.assertListEqual(list(hidden_states[0].shape[(- 2):]), [self.model_tester.seq_length, self.model_tester.hidden_size]) def test_model_common_attributes(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) x = model.get_output_embeddings() assert ((x is None) or isinstance(x, tf.keras.layers.Layer)) def test_determinism(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) (first, second) = (model(inputs_dict, training=False)[0], model(inputs_dict, training=False)[0]) out_1 = first.numpy() out_2 = second.numpy() out_1 = out_1[(~ np.isnan(out_1))] out_2 = out_2[(~ np.isnan(out_2))] max_diff = np.amax(np.abs((out_1 - out_2))) self.assertLessEqual(max_diff, 1e-05) def _get_embeds(self, wte, input_ids): try: x = wte(input_ids, mode='embedding') except Exception: try: x = wte([input_ids], mode='embedding') except Exception: try: x = wte([input_ids, None, None, None], mode='embedding') except Exception: if hasattr(self.model_tester, 'embedding_size'): x = tf.ones((input_ids.shape + [self.model_tester.embedding_size]), dtype=tf.dtypes.float32) else: x = tf.ones((input_ids.shape + [self.model_tester.hidden_size]), dtype=tf.dtypes.float32) return x def test_inputs_embeds(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() if (not self.is_encoder_decoder): input_ids = inputs_dict['input_ids'] del inputs_dict['input_ids'] else: encoder_input_ids = inputs_dict['encoder_input_ids'] decoder_input_ids = inputs_dict['decoder_input_ids'] del inputs_dict['encoder_input_ids'] del inputs_dict['decoder_input_ids'] for model_class in self.all_model_classes: model = model_class(config) wte = model.get_input_embeddings() if (not self.is_encoder_decoder): inputs_dict['inputs_embeds'] = self._get_embeds(wte, input_ids) else: inputs_dict['encoder_inputs_embeds'] = self._get_embeds(wte, encoder_input_ids) inputs_dict['decoder_inputs_embeds'] = self._get_embeds(wte, decoder_input_ids) model(inputs_dict)
def add_prefix_each_line(prefix, str): lines = [f'{prefix}{line}' for line in str.split('\n')] return '\n'.join(lines)
def preprocess_for_sft(df: pd.DataFrame, prompt_dict: dict, tokenizer: transformers.PreTrainedTokenizer, df_postprocessor=None, verbose=True) -> dict[(str, Union[(torch.Tensor, Sequence[torch.Tensor])])]: if (df_postprocessor is not None): df = df_postprocessor(df) list_dict_data = df.to_dict(orient='records') sources = [format_prompt(dict_data, prompt_dict) for dict_data in list_dict_data] targets = [format_output(dict_data, eos_token=tokenizer.eos_token) for dict_data in list_dict_data] examples = [(s + t) for (s, t) in utils.zip_(sources, targets)] (examples_tokenized, sources_tokenized) = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)] input_ids = examples_tokenized['input_ids'] labels = copy.deepcopy(input_ids) for (label, source_len) in utils.zip_(labels, sources_tokenized['input_ids_lens']): label[:source_len] = constants.IGNORE_INDEX packaged_data = dict(input_ids=input_ids, labels=labels, metadata=dict(), tokenization_metadata=examples_tokenized['tokenization_metadata']) if verbose: logger.warning(f'''Tokenization metadata: {utils.jdumps(packaged_data['tokenization_metadata'])}''') return packaged_data
def _generate_dilation_grids(spatial_shapes, kernel_h, kernel_w, dilation_h, dilation_w, group, device): (_, H_, W_, _) = spatial_shapes points_list = [] (x, y) = torch.meshgrid(torch.linspace((- ((dilation_w * (kernel_w - 1)) // 2)), ((- ((dilation_w * (kernel_w - 1)) // 2)) + ((kernel_w - 1) * dilation_w)), kernel_w, dtype=torch.float32, device=device), torch.linspace((- ((dilation_h * (kernel_h - 1)) // 2)), ((- ((dilation_h * (kernel_h - 1)) // 2)) + ((kernel_h - 1) * dilation_h)), kernel_h, dtype=torch.float32, device=device)) points_list.extend([(x / W_), (y / H_)]) grid = torch.stack(points_list, (- 1)).reshape((- 1), 1, 2).repeat(1, group, 1).permute(1, 0, 2) grid = grid.reshape(1, 1, 1, ((group * kernel_h) * kernel_w), 2) return grid
def train(): logging('Training') train_data = batchify(corpus.train, args.batch_size, shuffle=True) if (args.niters_gan_schedule != ''): gan_schedule = [int(x) for x in args.niters_gan_schedule.split('-')] else: gan_schedule = [] niter_gan = 1 fixed_noise = Variable(torch.ones(args.batch_size, args.z_size).normal_(0, 1).cuda()) best_rev_ppl = None impatience = 0 for epoch in range(1, (args.epochs + 1)): if (epoch in gan_schedule): niter_gan += 1 logging('GAN training loop schedule: {}'.format(niter_gan)) total_loss_ae = 0 epoch_start_time = time.time() start_time = time.time() niter = 0 niter_g = 1 while (niter < len(train_data)): for i in range(args.niters_ae): if (niter >= len(train_data)): break (total_loss_ae, start_time) = train_ae(epoch, train_data[niter], total_loss_ae, start_time, niter) niter += 1 for k in range(niter_gan): for i in range(args.niters_gan_d): (errD, errD_real, errD_fake) = train_gan_d(train_data[random.randint(0, (len(train_data) - 1))]) for i in range(args.niters_gan_ae): train_gan_d_into_ae(train_data[random.randint(0, (len(train_data) - 1))]) for i in range(args.niters_gan_g): errG = train_gan_g() niter_g += 1 if ((niter_g % 100) == 0): autoencoder.noise_anneal(args.noise_anneal) logging('[{}/{}][{}/{}] Loss_D: {:.8f} (Loss_D_real: {:.8f} Loss_D_fake: {:.8f}) Loss_G: {:.8f}'.format(epoch, args.epochs, niter, len(train_data), errD.data[0], errD_real.data[0], errD_fake.data[0], errG.data[0])) (test_loss, accuracy) = evaluate_autoencoder(test_data, epoch) logging('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | test ppl {:5.2f} | acc {:3.3f}'.format(epoch, (time.time() - epoch_start_time), test_loss, math.exp(test_loss), accuracy)) gen_fixed_noise(fixed_noise, os.path.join(args.save, '{:03d}_examplar_gen'.format(epoch))) (rev_ppl, for_ppl) = train_lm(args.data_path) logging('Epoch {:03d}, Reverse perplexity {}'.format(epoch, rev_ppl)) logging('Epoch {:03d}, Forward perplexity {}'.format(epoch, for_ppl)) if ((best_rev_ppl is None) or (rev_ppl < best_rev_ppl)): impatience = 0 best_rev_ppl = rev_ppl logging('New saving model: epoch {:03d}.'.format(epoch)) save_model() elif ((not args.no_earlystopping) and (epoch >= args.min_epochs)): impatience += 1 if (impatience > args.patience): logging('Ending training') sys.exit()
def train_cifar_track_acc(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device, num_epochs=200, verbose=False, use_intermediate=False): best_model_sd = copy.deepcopy(model.state_dict()) best_error = 1e+100 criterion2 = torch.nn.CrossEntropyLoss() for epoch in range(num_epochs): if verbose: print() for phase in ['train', 'dev']: if (phase == 'train'): model.train(True) else: model.train(False) running_loss = 0.0 running_corrects = 0 for data in dataloaders[phase]: (rgb, gt_label) = (data[0], data[1]) rgb = rgb.to(device) gt_label = gt_label.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): (output, output_i) = model(rgb) if (not use_intermediate): loss = criterion(output, gt_label) else: loss = (criterion(output, gt_label) + (0.4 * criterion2(output_i, gt_label))) (_, preds) = torch.max(output, 1) if (phase == 'train'): scheduler.step() if isinstance(scheduler, sc.LRCosineAnnealingScheduler): scheduler.update_optimizer(optimizer) loss.backward() optimizer.step() running_loss += (loss.item() * rgb.size(0)) running_corrects += torch.sum((preds == gt_label.data)) epoch_error = (1.0 - (running_corrects.double() / dataset_sizes[phase])) if (phase == 'dev'): if (epoch_error < best_error): best_error = epoch_error best_model_sd = copy.deepcopy(model.state_dict()) if verbose: print('Epoch #{} val error: {}'.format(epoch, epoch_error)) model.load_state_dict(best_model_sd) model.train(False) if verbose: print('Best val error: {}'.format(best_error)) return (1.0 - best_error)
def export_tracing(torch_model, inputs): assert (TORCH_VERSION >= (1, 8)) image = inputs[0]['image'] inputs = [{'image': image}] if isinstance(torch_model, GeneralizedRCNN): def inference(model, inputs): inst = model.inference(inputs, do_postprocess=False)[0] return [{'instances': inst}] else: inference = None traceable_model = TracingAdapter(torch_model, inputs, inference) if (args.format == 'torchscript'): ts_model = torch.jit.trace(traceable_model, (image,)) with PathManager.open(os.path.join(args.output, 'model.ts'), 'wb') as f: torch.jit.save(ts_model, f) dump_torchscript_IR(ts_model, args.output) elif (args.format == 'onnx'): with PathManager.open(os.path.join(args.output, 'model.onnx'), 'wb') as f: torch.onnx.export(traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION) logger.info(('Inputs schema: ' + str(traceable_model.inputs_schema))) logger.info(('Outputs schema: ' + str(traceable_model.outputs_schema))) if (args.format != 'torchscript'): return None if (not isinstance(torch_model, (GeneralizedRCNN, RetinaNet))): return None def eval_wrapper(inputs): input = inputs[0] instances = traceable_model.outputs_schema(ts_model(input['image']))[0]['instances'] postprocessed = detector_postprocess(instances, input['height'], input['width']) return [{'instances': postprocessed}] return eval_wrapper
def cfg(): seed = 2021 gpu_id = 0 num_workers = 0 mode = 'train' dataset = 'CHAOST2' exclude_label = [1, 2, 3, 4] if (dataset == 'CMR'): n_sv = 1000 else: n_sv = 5000 min_size = 200 max_slices = 3 use_gt = False eval_fold = 0 test_label = [1, 4] supp_idx = 0 n_part = 3 n_steps = 1000 batch_size = 1 n_shot = 1 n_way = 1 n_query = 1 lr_step_gamma = 0.95 bg_wt = 0.1 t_loss_scaler = 0.0 ignore_label = 255 print_interval = 100 save_snapshot_every = 1000 max_iters_per_load = 1000 reload_model_path = None optim_type = 'sgd' optim = {'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0005} exp_str = '_'.join((([mode] + [dataset]) + [f'cv{eval_fold}'])) path = {'log_dir': './runs', 'CHAOST2': {'data_dir': './data/CHAOST2'}, 'SABS': {'data_dir': './data/SABS'}, 'CMR': {'data_dir': './data/CMR'}}
def mse_loss_plus_rank_loss(output, target): cost = output target_cost = target if (output.size()[0] > 1): inter = output[:(- 1)] inter_1 = output[1:] else: inter = torch.ones(1) inter_1 = (2 * torch.ones(1)) target_rank = torch.ones(inter.size()) loss_mse = nn.MSELoss(reduce=False) loss1 = (torch.sqrt(loss_mse(cost, target_cost)) / (target_cost + 0.001)) loss1 = torch.mean(loss1) loss_rank = nn.MarginRankingLoss() loss2 = loss_rank(inter_1, inter, target_rank) return [loss1, loss2]
def compute_score_with_logits(logits, labels): logits = torch.max(logits, (- 1))[1].data return (logits == labels)
def main(cfg): if (cfg.training.resume is not None): log_dir = cfg.training.log_dir checkpoint_dir = os.path.dirname(cfg.training.resume) else: timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f') log_dir = os.path.join(cfg.training.logs_dir, '{}_{}'.format(timestamp, cfg.training.experiment_name)) checkpoint_dir = os.path.join(cfg.training.checkpoints_dir, '{}_{}'.format(timestamp, cfg.training.experiment_name)) if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) print('log_dir: {}'.format(log_dir)) print('checkpoint_dir: {}'.format(checkpoint_dir)) single_model = models.DRNSeg(cfg.arch, cfg.data.classes, None, pretrained=True) model = torch.nn.DataParallel(single_model).cuda() cudnn.benchmark = True criterion = nn.NLLLoss().cuda() optimizer = torch.optim.SGD(single_model.optim_parameters(), cfg.optimizer.lr, momentum=cfg.optimizer.momentum, weight_decay=cfg.optimizer.weight_decay) start_epoch = 0 if (cfg.training.resume is not None): if os.path.isfile(cfg.training.resume): print("=> loading checkpoint '{}'".format(cfg.training.resume)) checkpoint = torch.load(cfg.training.resume) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(cfg.training.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(cfg.training.resume)) crop_transform = transforms.CropTransform(shape=(640, 480)) zoom_generator = transforms.RandomIntGenerator(480, 540) zoom_bilinear_transform = transforms.ZoomTransform(interpolation='bilinear', generator=zoom_generator) zoom_nearest_transform = transforms.ZoomTransform(interpolation='nearest', generator=zoom_generator) rotate_freq_generator = transforms.RandomFloatGenerator() rotate_angle_generator = transforms.RandomFloatGenerator() rotate_bilinear_transform = transforms.FrequencyTransform(freq=0.5, transform=transforms.RotateTransform(interpolation='bilinear', generator=rotate_angle_generator), generator=rotate_freq_generator) rotate_nearest_transform = transforms.FrequencyTransform(freq=0.5, transform=transforms.RotateTransform(interpolation='nearest', generator=rotate_angle_generator), generator=rotate_freq_generator) brightness_generator = transforms.RandomFloatGenerator() gamma_transform = transforms.BrightnessTransform(0.5, 1.5, brightness_generator) train_image_transforms = (zoom_bilinear_transform, rotate_bilinear_transform, crop_transform, gamma_transform, transforms.ToTensorTransform(torch.FloatTensor)) label_transforms = (zoom_nearest_transform, rotate_nearest_transform, crop_transform, transforms.ToTensorTransform(torch.LongTensor)) train_transforms = transforms.ParallelTransform([train_image_transforms, label_transforms]) val_transforms = transforms.Compose([transforms.ToTensor()]) if cfg.data.train_all: train_dataset = datasets.Dataset(cfg.data.root, cfg.data.ann_file, 'train', train_transforms) else: train_dataset = datasets.Dataset(cfg.data.root, ('train_' + cfg.data.ann_file), 'train', train_transforms) val_dataset = datasets.Dataset(cfg.data.root, ('val_' + cfg.data.ann_file), 'val', val_transforms) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.data.batch_size, shuffle=True, num_workers=cfg.data.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=cfg.data.batch_size, shuffle=True, num_workers=cfg.data.workers, pin_memory=True) train_summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'train')) val_summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'val')) visualization_summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'visualization')) for epoch in range(start_epoch, cfg.training.epochs): lr = adjust_learning_rate(optimizer, epoch) train_summary_writer.add_scalar('learning_rate', lr, (epoch + 1)) (train_batch_time, train_data_time, train_loss) = train(train_loader, model, criterion, optimizer, epoch) train_summary_writer.add_scalar('batch_time', train_batch_time, (epoch + 1)) train_summary_writer.add_scalar('data_time', train_data_time, (epoch + 1)) train_summary_writer.add_scalar('loss', train_loss, (epoch + 1)) (val_batch_time, val_data_time, val_loss, val_accuracy, val_ious) = validate(val_loader, model, criterion) val_summary_writer.add_scalar('batch_time', val_batch_time, (epoch + 1)) val_summary_writer.add_scalar('data_time', val_data_time, (epoch + 1)) val_summary_writer.add_scalar('loss', val_loss, (epoch + 1)) val_summary_writer.add_scalar('accuracy', val_accuracy, (epoch + 1)) for (i, iou) in enumerate(val_ious): if ((not np.isnan(iou)) and (iou != 0)): val_summary_writer.add_scalar('iou_{}'.format(cfg.data.class_names[i]), iou, (epoch + 1)) (first_input_batch, first_target_batch) = iter(val_loader).next() rendered = visualize_batch(utils.visualize, model, first_input_batch, first_target_batch) visualization_summary_writer.add_image('segmentation', torch.from_numpy(rendered).permute(2, 0, 1), (epoch + 1)) if (((epoch + 1) % cfg.training.checkpoint_epochs) == 0): checkpoint_path = save_checkpoint(checkpoint_dir, {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, (epoch + 1)) cfg.training.log_dir = log_dir cfg.training.resume = checkpoint_path with open(os.path.join(log_dir, 'config.yml'), 'w') as f: f.write(cfg.toYAML())
(version='2.0') class DataLoader(object): def __init__(self, dataset, batch_size=1, collate_fn=None, last_batch='rollover', sampler=None, batch_sampler=None, num_workers=0, pin_memory=False, shuffle=False, distributed=False): assert (hasattr(dataset, '__iter__') or hasattr(dataset, '__getitem__')), 'dataset must implement __iter__ or __getitem__ magic method!' self.dataset = dataset self.batch_size = batch_size self.collate_fn = collate_fn self.last_batch = last_batch self.sampler = sampler self.batch_sampler = batch_sampler self.num_workers = num_workers self.pin_memory = pin_memory self.shuffle = shuffle self.distributed = distributed
def make_save_dir(yaml, img_shape, scale_num, scale_factor, sr_rate=None): save_dir = [] save_dir.append(f"[{os.path.basename(yaml.DATASET.img_path).split('.')[0]}]") save_dir.append('{}x{}'.format(*img_shape)) save_dir.append(f'S{scale_num}') save_dir.append(f'CH{yaml.NET.net_ch}') if (sr_rate is None): save_dir.append(f'SF{scale_factor:.3f}'.replace('0.', '')) else: save_dir.append(f'SRx{sr_rate}') for (i, loss) in enumerate(yaml.TRAIN.losses): if (i == 0): loss = ('[' + loss) if (i == (len(yaml.TRAIN.losses) - 1)): loss = (loss + ']') save_dir.append(loss) if yaml.TRAIN.interm_rgb: save_dir.append('RGB') save_dir.append(f'[{str(yaml.TRAIN.iter_per_scale)}|{str(yaml.TRAIN.pixel_shuffle_p)}]') folders = glob(f'{os.getcwd()}/outs/*]') cnt = len(glob(f'{os.getcwd()}/outs/*]')) for f in folders: try: cnt = max(cnt, int(os.path.basename(f)[:3])) except Exception: continue cnt += 1 return (f"{os.getcwd()}/outs/{cnt:03d}_{'_'.join(save_dir)}_[{'_'.join(yaml.DESC)}]", cnt)
def _load_encoders_parallel(encoder_paths, n_processes=None): n_processes = (len(encoder_paths) if (n_processes is None) else min(len(encoder_paths), n_processes)) n_parallel = min(multiprocessing.cpu_count(), n_processes) pool = multiprocessing.Pool(min(n_parallel, n_processes)) experts = pool.map(_load_encoder, encoder_paths) pool.close() pool.join() experts = [e.cuda() for e in experts] return experts
def add_train_args(parser: argparse.ArgumentParser): parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--out_dir', type=str, default='out', required=False) parser.add_argument('--eval_interval', type=int, default=2000, required=False) parser.add_argument('--log_interval', type=int, default=1, required=False) parser.add_argument('--eval_iters', type=int, default=200, required=False) parser.add_argument('--eval_only', action='store_true', required=False) parser.add_argument('--batch_size', type=int, default=32, required=False) parser.add_argument('--block_size', type=int, default=128, required=False) parser.add_argument('--epochs', type=int, default=1, required=False) parser.add_argument('--n_layer', type=int, default=6, required=False) parser.add_argument('--n_head', type=int, default=6, required=False) parser.add_argument('--n_embd', type=int, default=384, required=False) parser.add_argument('--dropout', type=float, default=0.0, required=False) parser.add_argument('--bias', action='store_true', required=False) parser.add_argument('--lora_rank', type=int, default=4, required=False) parser.add_argument('--lora_dropout', type=float, default=0.0, required=False) parser.add_argument('--lora_alpha', type=float, default=1.0, required=False) parser.add_argument('--lora_targets', type=str, default='wq,wk,wo,wv', required=False, help='comma separated list of targets to apply lora to') parser.add_argument('--learning_rate', type=float, default=0.0006, required=False) parser.add_argument('--max_iters', type=int, default=2000, required=False) parser.add_argument('--weight_decay', type=float, default=0.1, required=False) parser.add_argument('--beta1', type=float, default=0.9, required=False) parser.add_argument('--beta2', type=float, default=0.95, required=False) parser.add_argument('--grad_clip', type=float, default=1.0, required=False) parser.add_argument('--gradient_accumulation_steps', type=int, default=0, required=False) parser.add_argument('--decay_lr', action='store_true', required=False) parser.add_argument('--warmup_iters', type=int, default=0, required=False) parser.add_argument('--lr_decay_iters', type=int, default=10, required=False) parser.add_argument('--min_lr', type=float, default=6e-05, required=False) parser.add_argument('--compile', type=str, default='False', required=False) parser.add_argument('--save_memory_interval', type=int, default=20, required=False) parser.add_argument('--save_storage_interval', type=int, default=200, required=False) parser.add_argument('--use_native_ckpt', action='store_true', required=False) parser.add_argument('--save_dir', type=str, default='/tmp/checkpoint/', required=False)
class LogAudioCallback(Callback): model: pl.LightningModule stored_forward: MethodType def __init__(self, on_train: bool, on_val: bool, on_test: bool, save_audio_sr: int=48000, n_batches: int=1, log_on_epoch_end: bool=False, max_audio_samples: int=8): self.on_train = on_train self.on_val = on_val self.on_test = on_test self.save_audio_sr = save_audio_sr self.n_batches = n_batches self.saved_targets: dict[(str, list[Any])] = dict(train=[], val=[], test=[]) self.saved_reconstructions: dict[(str, list[Any])] = dict(train=[], val=[], test=[]) self.log_on_epoch_end = log_on_epoch_end self.max_audio_samples = max_audio_samples def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: str) -> None: self.model = pl_module def _wrap_forward(self, split: str) -> None: self.stored_forward = self.model.forward def wrapped_forward(self, *args, callback=None, split=None, **kwargs): output = callback.stored_forward(*args, **kwargs) callback._save_batch(output, split, 'reconstruction') return output wrapped_forward = partial(MethodType(wrapped_forward, self.model), callback=self, split=split) self.model.forward = wrapped_forward def _unwrap_forward(self) -> None: self.model.forward = self.stored_forward def on_train_batch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule, batch: Any, batch_idx: int) -> None: if (self.on_train and (batch_idx < self.n_batches)): self._wrap_forward('train') self._save_batch(batch[0], 'train', 'target') def on_train_batch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int) -> None: if (self.on_train and (batch_idx < self.n_batches)): self._unwrap_forward() elif (self.on_train and (batch_idx == self.n_batches) and (not self.log_on_epoch_end)): self._log_audio('train') self._clear_saved_batches('train') def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if (self.on_train and self.log_on_epoch_end): self._log_audio('train') self._clear_saved_batches('train') def on_validation_batch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule, batch: Any, batch_idx: int, dataloader_idx: int=0) -> None: if (self.on_val and (batch_idx < self.n_batches)): self._wrap_forward('val') self._save_batch(batch[0], 'val', 'target') def on_validation_batch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int=0) -> None: if (self.on_val and (batch_idx < self.n_batches)): self._unwrap_forward() elif (self.on_val and (batch_idx == self.n_batches) and (not self.log_on_epoch_end)): self._log_audio('val') self._clear_saved_batches('val') def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if (self.on_val and self.log_on_epoch_end): self._log_audio('val') self._clear_saved_batches('val') def on_test_batch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule, batch: Any, batch_idx: int, dataloader_idx: int) -> None: if (self.on_test and (batch_idx < self.n_batches)): self._wrap_forward('test') self._save_batch(batch[0], 'test', 'target') def on_test_batch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int) -> None: if (self.on_test and (batch_idx < self.n_batches)): self._unwrap_forward() elif (self.on_test and (batch_idx == self.n_batches) and (not self.log_on_epoch_end)): self._log_audio('test') self._clear_saved_batches('test') def on_test_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if (self.on_test and self.log_on_epoch_end): self._log_audio('test') self._clear_saved_batches('test') def _save_batch(self, batch: Any, split: str, type: Literal[('target', 'reconstruction')]) -> None: batch = batch.detach().cpu() if (type == 'target'): self.saved_targets[split].append(batch) elif (type == 'reconstruction'): self.saved_reconstructions[split].append(batch) else: raise ValueError(f'Unknown type {type}') def _clear_saved_batches(self, split: str) -> None: self.saved_targets[split] = [] self.saved_reconstructions[split] = [] def _log_audio(self, split: str) -> None: if ((len(self.saved_targets[split]) == 0) or (len(self.saved_reconstructions[split]) == 0)): return targets = torch.cat(self.saved_targets[split], dim=0) reconstructions = torch.cat(self.saved_reconstructions[split], dim=0) if (self.max_audio_samples is not None): targets = targets[:self.max_audio_samples] reconstructions = reconstructions[:self.max_audio_samples] signals = reduce((lambda x, y: (x + y)), zip(targets, reconstructions)) audio_signal = torch.hstack(signals).cpu() if isinstance(self.model.logger, WandbLogger): audio_signal = audio_signal.squeeze().numpy() audio = Audio(audio_signal, caption=f'{split}/audio', sample_rate=self.save_audio_sr) if (self.model.logger is not None): self.model.logger.experiment.log({f'{split}/audio': audio}) elif isinstance(self.model.logger, TensorBoardLogger): outdir = Path(self.model.logger.log_dir).joinpath('audio') outdir.mkdir(parents=True, exist_ok=True) torchaudio.save(str(outdir.joinpath(f'{split}.wav')), audio_signal, self.save_audio_sr)
class CenterLoss(nn.Module): def __init__(self, num_classes=10, feat_dim=2, use_gpu=True): super(CenterLoss, self).__init__() self.num_classes = num_classes self.feat_dim = feat_dim self.use_gpu = use_gpu if self.use_gpu: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) else: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) def forward(self, x, labels): batch_size = x.size(0) distmat = (torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()) distmat.addmm_(1, (- 2), x, self.centers.t()) classes = torch.arange(self.num_classes).long() if self.use_gpu: classes = classes.cuda() classes = Variable(classes) labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) mask = labels.eq(classes.expand(batch_size, self.num_classes)) dist = [] for i in range(batch_size): value = distmat[i][mask[i]] value = value.clamp(min=1e-12, max=.0) dist.append(value) dist = torch.cat(dist) loss = dist.mean() return loss
class TransformerBlock(nn.Module): def __init__(self, dim, heads=8, dim_head=None, dim_linear_block=1024, dropout=0.1, activation=nn.GELU, mhsa=None, prenorm=False): super().__init__() self.mhsa = (mhsa if (mhsa is not None) else MultiHeadSelfAttention(dim=dim, heads=heads, dim_head=dim_head)) self.prenorm = prenorm self.drop = nn.Dropout(dropout) self.norm_1 = nn.LayerNorm(dim) self.norm_2 = nn.LayerNorm(dim) self.linear = nn.Sequential(nn.Linear(dim, dim_linear_block), activation(), nn.Dropout(dropout), nn.Linear(dim_linear_block, dim), nn.Dropout(dropout)) def forward(self, x, mask=None): if self.prenorm: y = (self.drop(self.mhsa(self.norm_1(x), mask)) + x) out = (self.linear(self.norm_2(y)) + y) else: y = self.norm_1((self.drop(self.mhsa(x, mask)) + x)) out = self.norm_2((self.linear(y) + y)) return out
def load_checkpoint_to_cpu(path, arg_overrides=None): with PathManager.open(path, 'rb') as f: state = torch.load(f, map_location=(lambda s, l: default_restore_location(s, 'cpu'))) args = state['args'] if (arg_overrides is not None): for (arg_name, arg_val) in arg_overrides.items(): setattr(args, arg_name, arg_val) state = _upgrade_state_dict(state) return state
class Sample(BSample): def __init__(self, features, labels, bigdl_type='float'): super(Sample, self).__init__(features, labels, bigdl_type) def from_ndarray(cls, features, labels, bigdl_type='float'): features = to_list_of_numpy(features) labels = to_list_of_numpy(labels) return cls(features=[JTensor(feature, feature.shape) for feature in features], labels=[JTensor(label, label.shape) for label in labels], bigdl_type=bigdl_type)
def process_article(article): article = process_article_sent_tokenize(article) new_article = [] for sent in article: insert_new(new_article, sent) return new_article
class SparseConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, weight, bias, mask): super(SparseConv2d, self).__init__() kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.weight = nn.Parameter(weight.clone(), requires_grad=False) self.mask = mask.clone() self.dense_weight_placeholder = nn.Parameter(torch.empty(size=self.weight.size())) self.dense_weight_placeholder.is_placeholder = True self.weight.dense = self.dense_weight_placeholder self.weight.mask = self.mask self.weight.is_sparse_param = True if (bias is None): self.bias = torch.zeros(size=(out_channels,)) else: self.bias = nn.Parameter(bias.clone()) def forward(self, inp): return SparseConv2dFunction.apply(inp, self.weight, self.dense_weight_placeholder, self.kernel_size, self.bias, self.stride, self.padding) def __repr__(self): return 'SparseConv2d({}, {}, kernel_size={}, stride={}, padding={}, bias={})'.format(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, (not torch.equal(self.bias, torch.zeros_like(self.bias)))) def __str__(self): return self.__repr__()
def train_epoch_distill(teacher_model, student_model, optimizer, baseline, lr_scheduler, epoch, val_dataset, problem, tb_logger, opts): print('') print('Start train AMDKD student model epoch {}, lr={} for run {}'.format(epoch, optimizer.param_groups[0]['lr'], opts.run_name)) step = (epoch * (opts.epoch_size // opts.batch_size)) start_time = time.time() if (not opts.no_tensorboard): tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step) if (opts.multi_teacher and opts.distill_distribution): for i in ['uniform', 'cluster', 'mixed']: load_path = opts.load_path_multi[i] print(' [*] Loading data from {}'.format(load_path)) load_data = torch_load_cpu(load_path) model_ = get_inner_model(teacher_model[i]) if opts.is_load_multi: state_dict = load_data.get('model', {}) new_state_dict = OrderedDict() for (k, v) in state_dict.items(): name = k[7:] new_state_dict[name] = v model_.load_state_dict({**model_.state_dict(), **new_state_dict}) else: model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})}) teacher_model[i].eval() set_decode_type(teacher_model[i], 'sampling') elif opts.distill_distribution: if (opts.adaptive_prob and (epoch >= opts.start_adaptive_epoch)): if (opts.random_adaptive_prob != 0): if (np.random.rand() < opts.random_adaptive_prob): class_type = np.random.choice(['uniform', 'cluster', 'mixed'], size=1, p=opts.teacher_prob) load_path = opts.load_path_multi[class_type.item()] print(' [*] Loading data from {}, prob: {} [randomly choose]'.format(load_path, opts.teacher_prob)) else: class_type = np.random.choice(['uniform', 'cluster', 'mixed'], 1) load_path = opts.load_path_multi[class_type.item()] print(' [*] Loading data from {} [randomly choose]'.format(load_path)) else: class_type = np.random.choice(['uniform', 'cluster', 'mixed'], size=1, p=opts.teacher_prob) load_path = opts.load_path_multi[class_type.item()] print(' [*] Loading data from {}, prob: {}'.format(load_path, opts.teacher_prob)) else: class_type = np.random.choice(['uniform', 'cluster', 'mixed'], 1) load_path = opts.load_path_multi[class_type.item()] print(' [*] Loading data from {}'.format(load_path)) load_data = torch_load_cpu(load_path) model_ = get_inner_model(teacher_model[class_type.item()]) if opts.is_load_multi: state_dict = load_data.get('model', {}) new_state_dict = OrderedDict() for (k, v) in state_dict.items(): name = k[7:] new_state_dict[name] = v model_.load_state_dict({**model_.state_dict(), **new_state_dict}) else: model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})}) teacher_model = teacher_model[class_type.item()] teacher_model.eval() set_decode_type(teacher_model, 'sampling') student_model.train() set_decode_type(student_model, 'sampling') training_dataset = baseline.wrap_dataset(problem.make_dataset(size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution, n_cluster=opts.n_cluster, mix_data=opts.generate_mix_data)) training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, num_workers=0) for (batch_id, batch) in enumerate(tqdm(training_dataloader, disable=opts.no_progress_bar)): train_batch_distill(teacher_model, student_model, optimizer, baseline, step, batch, tb_logger, opts) step += 1 epoch_duration = (time.time() - start_time) print('Finished epoch {}, took {} s'.format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration)))) if (((opts.checkpoint_epochs != 0) and ((epoch % opts.checkpoint_epochs) == 0)) or (epoch == (opts.n_epochs - 1))): print('Saving model and state...') torch.save({'model': get_inner_model(student_model).state_dict(), 'optimizer': optimizer.state_dict(), 'rng_state': torch.get_rng_state(), 'cuda_rng_state': torch.cuda.get_rng_state_all(), 'baseline': baseline.state_dict()}, os.path.join(opts.save_dir, 'epoch-{}.pt'.format(epoch))) if (opts.distill_distribution and (not opts.multi_teacher)): model_path = os.path.join(opts.save_dir, 'epoch-{}-{}-model-only.pt'.format(epoch, class_type.item())) else: model_path = os.path.join(opts.save_dir, 'epoch-{}-model-only.pt'.format(epoch)) torch.save({'model': get_inner_model(student_model).state_dict()}, model_path) if opts.save_best: print('Saving best trained_model') torch.save({'best': opts.best, 'epoch': epoch, 'model': get_inner_model(student_model).state_dict()}, os.path.join(opts.save_dir, 'epoch-best.pt')) if opts.multi_test: avg_reward_uniform = validate(student_model, val_dataset[0], opts, print_log='uniform') avg_reward_cluster = validate(student_model, val_dataset[1], opts, print_log='cluster') avg_reward_mixed = validate(student_model, val_dataset[2], opts, print_log='mixed') if (not opts.no_tensorboard): tb_logger.log_value('val_avg_reward_uniform', avg_reward_uniform, step) tb_logger.log_value('val_avg_reward_cluster', avg_reward_cluster, step) tb_logger.log_value('val_avg_reward_mixed', avg_reward_mixed, step) else: avg_reward = validate(student_model, val_dataset, opts) if (not opts.no_tensorboard): tb_logger.log_value('val_avg_reward', avg_reward, step) if (not opts.no_tensorboard): tb_logger.log_value('epoch_duration', epoch_duration, step) baseline.epoch_callback(student_model, epoch) lr_scheduler.step() if opts.adaptive_prob: return [avg_reward_uniform, avg_reward_cluster, avg_reward_mixed]
def get_data(): np.random.seed(0) seq_len = 400 data = np.random.rand(seq_len) horizon = np.random.randint(2, 50) validation_data = np.random.rand(horizon) return (data, validation_data)
_cache(maxsize=None) def median_kernel(filter_width: int): def kernel(y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) offsets = tl.arange(0, BLOCK_SIZE) mask = (offsets < y_stride) x_ptr = (x + (row_idx * x_stride)) y_ptr = (y + (row_idx * y_stride)) LOAD_ALL_ROWS_HERE BUBBLESORT_HERE tl.store((y_ptr + offsets), MIDDLE_ROW_HERE, mask=mask) kernel = triton.JITFunction(kernel.fn) kernel.src = kernel.src.replace(' LOAD_ALL_ROWS_HERE', '\n'.join([f' row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)' for i in range(filter_width)])) kernel.src = kernel.src.replace(' BUBBLESORT_HERE', '\n\n'.join(['\n\n'.join(['\n'.join([f' smaller = tl.where(row{j} < row{(j + 1)}, row{j}, row{(j + 1)})', f' larger = tl.where(row{j} > row{(j + 1)}, row{j}, row{(j + 1)})', f' row{j} = smaller', f' row{(j + 1)} = larger']) for j in range(((filter_width - i) - 1))]) for i in range(((filter_width // 2) + 1))])) kernel.src = kernel.src.replace('MIDDLE_ROW_HERE', f'row{(filter_width // 2)}') return kernel