code
stringlengths
101
5.91M
def validate_ext(args, device_id): timestep = 0 if args.test_all: cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt'))) cp_files.sort(key=os.path.getmtime) xent_lst = [] for (i, cp) in enumerate(cp_files): step = int(cp.split('.')[(- 2)].split('_')[(- 1)]) xent = validate(args, device_id, cp, step) xent_lst.append((xent, cp)) max_step = xent_lst.index(min(xent_lst)) if ((i - max_step) > 10): break xent_lst = sorted(xent_lst, key=(lambda x: x[0]))[:3] logger.info(('PPL %s' % str(xent_lst))) for (xent, cp) in xent_lst: step = int(cp.split('.')[(- 2)].split('_')[(- 1)]) test_ext(args, device_id, cp, step) else: while True: cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt'))) cp_files.sort(key=os.path.getmtime) if cp_files: cp = cp_files[(- 1)] time_of_cp = os.path.getmtime(cp) if (not (os.path.getsize(cp) > 0)): time.sleep(60) continue if (time_of_cp > timestep): timestep = time_of_cp step = int(cp.split('.')[(- 2)].split('_')[(- 1)]) validate(args, device_id, cp, step) test_ext(args, device_id, cp, step) cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt'))) cp_files.sort(key=os.path.getmtime) if cp_files: cp = cp_files[(- 1)] time_of_cp = os.path.getmtime(cp) if (time_of_cp > timestep): continue else: time.sleep(300)
def prepare_data(args): if (args.dataset == 'CamCAN'): CamCANHandler(args) elif (args.dataset == 'BraTS'): BraTSHandler(args) elif (args.dataset == 'ATLAS'): ATLASHandler(args) elif (args.dataset == 'DDR'): DDRHandler(args) else: raise NotImplementedError
def closest_holder(exp_scope, holder_scopes): exp_off1 = int(exp_scope.split(':')[0]) h = np.array([i[0] for i in holder_scopes]) idx = np.argmin(np.abs((h - exp_off1))) return holder_scopes[idx]
class BaseWarmUpLR(lr_scheduler._LRScheduler): def __init__(self, optimizer, warmup_type='NO', warmup_iters=0, warmup_factor=0.1): self._warmup_type = warmup_type.upper() assert (self.warmup_type in ['NO', 'CONST', 'LINEAR', 'EXP']) self._warmup_iters = warmup_iters self._warmup_factor = float(warmup_factor) super().__init__(optimizer, last_epoch=(- 1)) def warmup_type(self): return self._warmup_type def warmup_iters(self): return self._warmup_iters def warmup_factor(self): return self._warmup_factor def get_warmup_lr(self): progress = (self.last_epoch / self.warmup_iters) if (self.warmup_type == 'NO'): return self.base_lrs if (self.warmup_type == 'CONST'): return [(lr * self.warmup_factor) for lr in self.base_lrs] if (self.warmup_type == 'LINEAR'): scale = ((1 - progress) * (1 - self.warmup_factor)) return [(lr * (1 - scale)) for lr in self.base_lrs] if (self.warmup_type == 'EXP'): scale = (self.warmup_factor ** (1 - progress)) return [(lr * scale) for lr in self.base_lrs] raise ValueError(f'Invalid warm-up type `{self.warmup_type}`!') def _get_lr(self): raise NotImplementedError(f'Should be implemented in derived classes!') def get_lr(self): if (self.last_epoch < self.warmup_iters): return self.get_warmup_lr() return self._get_lr()
def plot_single_task_curve(aggregated_data: Dict[(str, Any)], algorithms: list, colors: Optional[Dict]=None, color_palette: str='colorblind', figsize: tuple=(7, 5), xlabel: str='Number of Frames (in millions)', ylabel: str='Aggregate Human Normalized Score', ax: Optional[Axes]=None, labelsize: str='xx-large', ticklabelsize: str='xx-large', **kwargs: Any) -> Axes: extra_info = aggregated_data.pop('extra') if (ax is None): (_, ax) = plt.subplots(figsize=figsize) if (algorithms is None): algorithms = list(aggregated_data.keys()) if (colors is None): color_palette = sns.color_palette(color_palette, n_colors=len(algorithms)) colors = dict(zip(algorithms, color_palette)) for algorithm in algorithms: x_axis_len = len(aggregated_data[algorithm]['mean']) x_axis_values = (np.arange(x_axis_len) * extra_info['evaluation_interval']) metric_values = np.array(aggregated_data[algorithm]['mean']) confidence_interval = np.array(aggregated_data[algorithm]['ci']) (lower, upper) = ((metric_values - confidence_interval), (metric_values + confidence_interval)) ax.plot(x_axis_values, metric_values, color=colors[algorithm], marker=kwargs.pop('marker', 'o'), linewidth=kwargs.pop('linewidth', 2), label=algorithm) ax.fill_between(x_axis_values, y1=lower, y2=upper, color=colors[algorithm], alpha=0.2) return _annotate_and_decorate_axis(ax, xlabel=xlabel, ylabel=ylabel, labelsize=labelsize, ticklabelsize=ticklabelsize, **kwargs)
class Fold(torch.nn.Module): def __init__(self, img_size, fold_size): super().__init__() self.n_locs = ((2 * (img_size // fold_size)) - 1) def forward(self, x): (dim_c, dim_x, dim_y) = x.size()[1:] x = x.reshape((- 1), (self.n_locs * self.n_locs), dim_c, (dim_x * dim_y)) x = x.reshape((- 1), (self.n_locs * self.n_locs), dim_c, (dim_x * dim_y)).permute(0, 2, 3, 1).reshape((- 1), ((dim_c * dim_x) * dim_y), self.n_locs, self.n_locs).contiguous() return x
.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_points_in_boxes_part(): boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]], [[(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda() pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)]], [[3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)], [6, 4, 9]]], dtype=torch.float32).cuda() point_indices = points_in_boxes_part(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[0, 0, 0, 0, 0, (- 1), (- 1), (- 1)], [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda() assert (point_indices.shape == torch.Size([2, 8])) assert (point_indices == expected_point_indices).all() boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32).cuda() pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32).cuda() point_indices = points_in_boxes_part(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[(- 1), (- 1), 0, (- 1), 0, (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda() assert (point_indices == expected_point_indices).all()
class ImageClient(): def __init__(self, top_only=False, port1=6000, port2=6001, scale_factor=1.0): if top_only: self.cameras = [CameraClientWrapper(CAMERA_SERIALS[0], port1, scale_factor=scale_factor)] else: self.cameras = [CameraClientWrapper(CAMERA_SERIALS[0], port1, scale_factor=scale_factor), CameraClientWrapper(CAMERA_SERIALS[1], port2, scale_factor=scale_factor)] image_width = self.cameras[0].image_width image_height = sum((camera.image_height for camera in self.cameras)) self.image_shape = (image_height, image_width) def get_image(self): return np.concatenate([camera.get_image() for camera in self.cameras], axis=0) def close(self): for camera in self.cameras: camera.camera_client.close()
_tokenizers class AutoTokenizerCustomTest(unittest.TestCase): def test_tokenizer_bert_japanese(self): EXAMPLE_BERT_JAPANESE_ID = 'cl-tohoku/bert-base-japanese' tokenizer = AutoTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertIsInstance(tokenizer, BertJapaneseTokenizer)
class SSIMMetric(BaseDistanceMetric): def __init__(self, data_range=None, mode='default', **kwargs): super(SSIMMetric, self).__init__(name='ssim', **kwargs) self.data_range = data_range self.mode = mode def add(self, es, ta, ma=None): if (es.shape != ta.shape): raise Exception('es and ta have to be of shape Nxdim') if (es.ndim == 3): es = es[(..., None)] ta = ta[(..., None)] if ((es.ndim != 4) or (es.shape[3] not in [1, 3])): raise Exception('es and ta have to be of shape bs x height x width x 0, 1, or 3') if (ma is not None): es = (ma * es) ta = (ma * ta) for bidx in range(es.shape[0]): if (self.mode == 'default'): ssim = skimage.measure.compare_ssim(es[bidx], ta[bidx], multichannel=True, data_range=self.data_range) elif (self.mode == 'deepvoxels'): ssim = 0 for c in range(3): ssim += skimage.measure.compare_ssim(es[(bidx, ..., c)], ta[(bidx, ..., c)], gaussian_weights=True, sigma=1.5, use_sample_covariance=False, data_range=1.0) ssim /= 3 else: raise Exception('invalid mode') ssim = (- ssim) self.dists.append(ssim)
def plot_models(data_path, figsize=(12, 4), max_params=128000.0, max_maccs=4500000.0): df = logmel_models(data_path) (fig, ax) = plt.subplots(1, figsize=figsize) check_missing(df, 'accuracy') check_missing(df, 'kparams') check_missing(df, 'mmacc') df.plot.scatter(x='params', y='macc_s', logx=True, logy=True, ax=ax) ax.set_xlabel('Model parameters') ax.set_ylabel('MACC / second') feasible_x = max_params feasible_y = max_maccs x = [0, feasible_x, feasible_x, 0] y = [0, 0, feasible_y, feasible_y] ax.fill(x, y, color='green', alpha=0.5) linestyle = dict(color='black', linewidth=0.5) ax.axvline(feasible_x, **linestyle) ax.axhline(feasible_y, **linestyle) def add_labels(row): xy = (row.params, row.macc_s) label = '{} {:.1f}%'.format(row['name'], (100 * row.accuracy)) ax.annotate(label, xy, xytext=(5, 40), textcoords='offset points', size=12, rotation=25, color='darkslategrey') df.apply(add_labels, axis=1) fig.tight_layout() return fig
def main(): parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', default='settings/pretrain.yaml', type=str, help='Setting files') parser.add_argument('-n', '--exp_name', default='exp_name', type=str, help='name of this experiment.') parser.add_argument('-l', '--lr', default=1e-05, type=float, help='Learning rate.') parser.add_argument('-s', '--seed', default=20, type=int, help='Training seed.') args = parser.parse_args() exp_name = args.exp_name with open(args.config, 'r') as f: config = yaml.safe_load(f) config['exp_name'] = args.exp_name config['seed'] = args.seed config['optim_args']['lr'] = args.lr seed = config['seed'] setup_seed(seed) device = torch.device(config['device']) exp_name = (exp_name + f"lr_{config['optim_args']['lr']}_seed_{seed}") wandb.init(project='audio-captioning', name=exp_name, config=config) (model_output_dir, log_output_dir) = set_logger(exp_name) main_logger = logger.bind(indent=1) dataloader = pretrain_dataloader(config, bucket=True, bucket_boundaries=(5, 30, 6), is_distributed=False, num_tasks=1, global_rank=0) if ('bart' in config['text_decoder_args']['name']): model = BartCaptionModel(config) elif ('bert' in config['text_decoder_args']['name']): model = BertCaptionModel(config) main_logger.info(f"Decoder model:{config['text_decoder_args']['name']}") model = model.to(device) wandb.watch(model) optimizer = get_optimizer(model.parameters(), lr=config['optim_args']['lr'], betas=config['optim_args']['betas'], eps=config['optim_args']['eps'], momentum=config['optim_args']['momentum'], weight_decay=config['optim_args']['weight_decay'], optimizer_name=config['optim_args']['optimizer_name']) scheduler = cosine_lr(optimizer, base_lr=config['optim_args']['lr'], warmup_length=(config['optim_args']['warmup_epochs'] * len(dataloader)), steps=(len(dataloader) * config['training']['epochs'])) start_epoch = 1 max_epoch = config['training']['epochs'] printer = PrettyPrinter() main_logger.info(f'''Training setting: {printer.pformat(config)}''') main_logger.info(f'Total numer of parameters: {sum([i.numel() for i in model.parameters()])}') main_logger.info(f'Size of training set: {len(dataloader.dataset)}, size of batches: {len(dataloader)}') ac_datamodule = AudioCaptionDataModule(config, 'AudioCaps') clotho_datamodule = AudioCaptionDataModule(config, 'Clotho') ac_val_loader = ac_datamodule.val_dataloader() clotho_val_loader = clotho_datamodule.val_dataloader() loss_stats = [] ac_spiders = [] clotho_spiders = [] for epoch in range(start_epoch, (max_epoch + 1)): main_logger.info(f'Training for epoch [{epoch}]') train_statics = train(model, dataloader, optimizer, scheduler, device, epoch) loss = train_statics['loss'] elapsed_time = train_statics['time'] loss_stats.append(loss) main_logger.info(f"Training statistics: loss for epoch [{epoch}]: {loss:.3f}, time: {elapsed_time:.1f}, lr: {optimizer.param_groups[0]['lr']:.6f}.") main_logger.info('Evaluating on AudioCaps...') for i in range(1, 4): ac_metrics = validate(ac_val_loader, model, device=device, log_dir=log_output_dir, epoch=epoch, beam_size=i) spider = ac_metrics['spider']['score'] if (i != 1): ac_spiders.append(spider) if (spider >= max(ac_spiders)): torch.save({'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'beam_size': i, 'epoch': epoch, 'config': config}, (str(model_output_dir) + '/ac_best_model.pt')) main_logger.info('Evaluating on Clotho...') for i in range(1, 4): clotho_metrics = validate(clotho_val_loader, model, device=device, log_dir=log_output_dir, epoch=epoch, beam_size=i) spider = clotho_metrics['spider']['score'] if (i != 1): clotho_spiders.append(spider) if (spider >= max(clotho_spiders)): torch.save({'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'beam_size': i, 'epoch': epoch, 'config': config}, (str(model_output_dir) + '/clotho_best_model.pt')) main_logger.info('Training done.') ac_test_loader = ac_datamodule.test_dataloader() clotho_test_loader = clotho_datamodule.test_dataloader() model.load_state_dict(torch.load((str(model_output_dir) + '/ac_best_model.pt'))['model']) main_logger.info(f"Evaluation best AudioCaps model... epoch:{torch.load((str(model_output_dir) + '/ac_best_model.pt'))['epoch']}") for i in range(1, 4): spider = validate(ac_test_loader, model, device=device, log_dir=log_output_dir, epoch=0, beam_size=i)['spider']['score'] wandb.log({f'AudioCaps/ac_model/spider(beam: {i})': spider}) for i in range(1, 4): spider = validate(clotho_test_loader, model, device=device, log_dir=log_output_dir, epoch=0, beam_size=i)['spider']['score'] wandb.log({f'Clotho/ac_model/spider(beam: {i})': spider}) model.load_state_dict(torch.load((str(model_output_dir) + '/clotho_best_model.pt'))['model']) main_logger.info(f"Evaluation best Clotho model... epoch:{torch.load((str(model_output_dir) + '/clotho_best_model.pt'))['epoch']}") for i in range(1, 4): spider = validate(ac_test_loader, model, device=device, log_dir=log_output_dir, epoch=0, beam_size=i)['spider']['score'] wandb.log({f'AudioCaps/clotho_model/spider(beam: {i})': spider}) for i in range(1, 4): spider = validate(clotho_test_loader, model, device=device, log_dir=log_output_dir, epoch=0, beam_size=i)['spider']['score'] wandb.log({f'Clotho/clotho_model/spider(beam: {i})': spider}) main_logger.info('Evaluation done.') wandb.finish()
def zhong_selfatt(U, dim, mask=None, seq_len=None, transform=None, scope=None, reuse=None): if (mask is None): assert (seq_len is not None) mask = tf.expand_dims(tf.sequence_mask(seq_len, tf.shape(U)[1]), axis=1) with tf.variable_scope((scope or 'zhong_selfAttention'), reuse=reuse): W1 = tf.get_variable('W1', [dim, dim]) b1 = tf.get_variable('b1', [dim]) W2 = tf.get_variable('W2', [dim, 1]) b2 = tf.get_variable('b2', [1]) layer1_output = tf.nn.tanh((tf.einsum('ijkl,lt->ijkt', U, W1) + b1)) logits = tf.nn.tanh(tf.squeeze((tf.einsum('ijkl,lt->ijkt', layer1_output, W2) + b2), axis=(- 1))) masked_logits = (logits * tf.cast(mask, dtype='float')) att = tf.nn.softmax(masked_logits) output = tf.einsum('ijkl,ijk->ijl', U, att) if (transform == 'expand'): output = tf.expand_dims(output, axis=1) elif (transform == 'squeeze'): output = tf.squeeze(output, axis=1) return output
def startOutputFile(): if (options.outputFileName is not None): output = open(options.outputFileName, 'w') else: output = sys.stdout output.write('/* Generated file, do not edit */\n\n') return output
def build_cnn(): l2_reg = keras.regularizers.l2(L2_LAMBDA) inpt = keras.layers.Input(shape=IMG_SHAPE) conv1 = keras.layers.Convolution2D(32, (5, 5), padding='same', activation='relu')(inpt) drop1 = keras.layers.Dropout(rate=0.1)(conv1) conv2 = keras.layers.Convolution2D(32, (5, 5), padding='same', activation='relu')(drop1) drop2 = keras.layers.Dropout(rate=0.2)(conv2) pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(drop2) conv3 = keras.layers.Convolution2D(64, (5, 5), padding='same', activation='relu')(pool1) drop3 = keras.layers.Dropout(rate=0.3)(conv3) conv4 = keras.layers.Convolution2D(64, (5, 5), padding='same', activation='relu')(drop3) drop4 = keras.layers.Dropout(rate=0.3)(conv4) pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(drop4) flat = keras.layers.Flatten()(pool2) dense1 = keras.layers.Dense(200, activation='relu', kernel_regularizer=l2_reg)(flat) drop5 = keras.layers.Dropout(rate=0.5)(dense1) dense2 = keras.layers.Dense(200, activation='relu', kernel_regularizer=l2_reg)(drop5) drop6 = keras.layers.Dropout(rate=0.5)(dense2) output = keras.layers.Dense(OUTPUT_DIM, activation=None, kernel_regularizer=l2_reg)(drop6) model = keras.models.Model(inputs=inpt, outputs=output) adam = keras.optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=adam, loss=output_fn, metrics=['accuracy']) return model
def GenIdx(train_color_label, train_thermal_label): color_pos = [] unique_label_color = np.unique(train_color_label) for i in range(len(unique_label_color)): tmp_pos = [k for (k, v) in enumerate(train_color_label) if (v == unique_label_color[i])] color_pos.append(tmp_pos) thermal_pos = [] unique_label_thermal = np.unique(train_thermal_label) for i in range(len(unique_label_thermal)): tmp_pos = [k for (k, v) in enumerate(train_thermal_label) if (v == unique_label_thermal[i])] thermal_pos.append(tmp_pos) return (color_pos, thermal_pos)
def get_data_provider_by_name(name, train_params): if (name == 'C10'): return Cifar10DataProvider(**train_params) if (name == 'C10+'): return Cifar10AugmentedDataProvider(**train_params) if (name == 'C100'): return Cifar100DataProvider(**train_params) if (name == 'C100+'): return Cifar100AugmentedDataProvider(**train_params) if (name == 'SVHN'): return SVHNDataProvider(**train_params) else: print(('Sorry, data provider for `%s` dataset was not implemented yet' % name)) exit()
(events=subsets(_ALL_EVENTS_WITH_HANDLERS)) _events_with_registered_handlers_to_subset def test_for_loop(events): assert (_RECORDED_EVENTS == []) run_cell('\n for i in range(10):\n pass\n ') throw_and_print_diff_if_recorded_not_equal_to(filter_events_to_subset((([TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.before_load_complex_symbol, TraceEvent.load_name, TraceEvent.before_call, TraceEvent.after_argument, TraceEvent.after_call, TraceEvent.after_load_complex_symbol] + ([TraceEvent.before_for_loop_body, TraceEvent.before_stmt, TraceEvent.after_stmt, TraceEvent.after_for_loop_iter] * 1)) + [TraceEvent.after_stmt, TraceEvent.after_module_stmt]), events))
class DampedRotarySpring(Constraint): def __init__(self, a, b, rest_angle, stiffness, damping): self._constraint = cp.cpDampedRotarySpringNew(a._body, b._body, rest_angle, stiffness, damping) self._ccontents = self._constraint.contents self._dsc = cp.cast(self._constraint, ct.POINTER(cp.cpDampedRotarySpring)).contents self._set_bodies(a, b) def _get_rest_angle(self): return self._dsc.restAngle def _set_rest_angle(self, rest_angle): self._dsc.restAngle = rest_angle rest_angle = property(_get_rest_angle, _set_rest_angle, doc='The relative angle in radians that the bodies want to have') def _get_stiffness(self): return self._dsc.stiffness def _set_stiffness(self, stiffness): self._dsc.stiffness = stiffness stiffness = property(_get_stiffness, _set_stiffness, doc="The spring constant (Young's modulus).") def _get_damping(self): return self._dsc.damping def _set_damping(self, damping): self._dsc.damping = damping damping = property(_get_damping, _set_damping, doc='How soft to make the damping of the spring.') def _set_torque_func(self, func): def _impl(_, relative_angle): return func(self, relative_angle) self._torque_func_callback = cp.cpDampedRotarySpringTorqueFunc(_impl) self._dsc.springTorqueFunc = self._torque_func_callback torque_func = property(fset=_set_torque_func, doc=_set_torque_func.__doc__)
def neg_squad(args): with open(args.source_path, 'r') as fp: squad = json.load(fp) with open(args.source_path, 'r') as fp: ref_squad = json.load(fp) for (ai, article) in enumerate(ref_squad['data']): for (pi, para) in enumerate(article['paragraphs']): cands = (list(range(pi)) + list(range((pi + 1), len(article['paragraphs'])))) samples = random.sample(cands, args.aug_ratio) for sample in samples: for (qi, ques) in enumerate(article['paragraphs'][sample]['qas']): new_ques = {'question': ques['question'], 'answers': [], 'answer_start': 0, 'id': ('neg_' + ques['id'])} squad['data'][ai]['paragraphs'][pi]['qas'].append(new_ques) with open(args.target_path, 'w') as fp: json.dump(squad, fp)
class TFCTRLForSequenceClassification(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def get_mnms_data(data_root): files_raw = [] files_gt = [] for (r, dirs, files) in os.walk(data_root): for f in files: if f.endswith('nii.gz'): file_path = os.path.join(r, f) if ('_gt' in f): files_gt.append(file_path) else: files_raw.append(file_path) return (files_raw, files_gt)
class Adam(torch.optim.Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) def supports_memory_efficient_fp16(self): return True def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = state['max_exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_((1 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1) if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) p.data.copy_(p_data_fp32) return loss
class DummyObject(type): def __getattribute__(cls, key): if (key.startswith('_') and (key != '_from_config')): return super().__getattribute__(key) requires_backends(cls, cls._backends)
def _tower_loss(network_fn, images, labels, input_seqs, input_masks): (image_features, _) = build_image_features(network_fn, images) (text_features, _) = build_text_features(input_seqs, input_masks) image_embeddings = build_joint_embeddings(image_features, scope='image_joint_embedding') text_embeddings = build_joint_embeddings(text_features, scope='text_joint_embedding') (loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss) = (0.0, 0.0, 0.0, 0.0, 0.0) cmpm_loss = tf.cast(cmpm_loss, tf.float32) cmpc_loss = tf.cast(cmpc_loss, tf.float32) i2t_loss = tf.cast(i2t_loss, tf.float32) t2i_loss = tf.cast(t2i_loss, tf.float32) if FLAGS.CMPM: (i2t_loss, t2i_loss, pos_avg_dist, neg_avg_dist) = cmpm_loss_compute(text_embeddings, image_embeddings, labels) cmpm_loss = (i2t_loss + t2i_loss) tf.summary.scalar('cmpm_i2t_loss', i2t_loss) tf.summary.scalar('cmpm_t2i_loss', t2i_loss) tf.summary.scalar('cmpm_loss', cmpm_loss) tf.summary.scalar('pos_avg_dist', pos_avg_dist) tf.summary.scalar('neg_avg_dist', neg_avg_dist) if FLAGS.CMPC: (ipt_loss, tpi_loss, image_precision, text_precision) = cmpc_loss_compute(text_embeddings, image_embeddings, labels) cmpc_loss = (ipt_loss + tpi_loss) tf.summary.scalar('cmpc_ipt_loss', ipt_loss) tf.summary.scalar('cmpc_tpi_loss', tpi_loss) tf.summary.scalar('cmpc_loss', cmpc_loss) tf.summary.scalar('image_precision', image_precision) tf.summary.scalar('text_precision', text_precision) loss = (cmpc_loss + cmpm_loss) reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = tf.add_n(([loss] + reg_loss), name='total_loss') loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg_loss') loss_averages_op = loss_averages.apply(([loss] + [total_loss])) tf.summary.scalar('loss_raw', loss) tf.summary.scalar('loss_avg', loss_averages.average(loss)) with tf.control_dependencies([loss_averages_op]): total_loss = tf.identity(total_loss) return (total_loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss)
class BAT(nn.Module): def __init__(self, num_classes, num_layers, point_pred, decoder=False, transformer_type_index=0, hidden_features=128, number_of_query_positions=1, segmentation_attention_heads=8): super(BAT, self).__init__() self.num_classes = num_classes self.point_pred = point_pred self.transformer_type = ('BoundaryAwareTransformer' if (transformer_type_index == 0) else 'Transformer') self.use_decoder = decoder self.deeplab = base(num_classes, num_layers) in_channels = (2048 if (num_layers == 50) else 512) self.convolution_mapping = nn.Conv2d(in_channels=in_channels, out_channels=hidden_features, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True) self.query_positions = nn.Parameter(data=torch.randn(number_of_query_positions, hidden_features, dtype=torch.float), requires_grad=True) self.row_embedding = nn.Parameter(data=torch.randn(100, (hidden_features // 2), dtype=torch.float), requires_grad=True) self.column_embedding = nn.Parameter(data=torch.randn(100, (hidden_features // 2), dtype=torch.float), requires_grad=True) self.transformer = [Transformer(d_model=hidden_features), BoundaryAwareTransformer(d_model=hidden_features)][point_pred] if self.use_decoder: self.BCA = BoundaryCrossAttention(hidden_features, 8) self.trans_out_conv = nn.Conv2d(in_channels=hidden_features, out_channels=in_channels, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True) def forward(self, x): h = x.size()[2] w = x.size()[3] feature_map = self.deeplab.resnet(x) features = self.convolution_mapping(feature_map) (height, width) = features.shape[2:] batch_size = features.shape[0] positional_embeddings = torch.cat([self.column_embedding[:height].unsqueeze(dim=0).repeat(height, 1, 1), self.row_embedding[:width].unsqueeze(dim=1).repeat(1, width, 1)], dim=(- 1)).permute(2, 0, 1).unsqueeze(0).repeat(batch_size, 1, 1, 1) if (self.transformer_type == 'BoundaryAwareTransformer'): (latent_tensor, features_encoded, point_maps) = self.transformer(features, None, self.query_positions, positional_embeddings) else: (latent_tensor, features_encoded) = self.transformer(features, None, self.query_positions, positional_embeddings) point_maps = [] latent_tensor = latent_tensor.permute(2, 0, 1) if self.use_decoder: (features_encoded, point_dec) = self.BCA(features_encoded, latent_tensor) point_maps.append(point_dec) trans_feature_maps = self.trans_out_conv(features_encoded.contiguous()) trans_feature_maps = (trans_feature_maps + feature_map) output = self.deeplab.aspp(trans_feature_maps) output = F.interpolate(output, size=(h, w), mode='bilinear') if (self.point_pred == 1): return (output, point_maps) return output
class PredictionTransform(): def __init__(self, size, mean=0.0, std=1.0): self.transform = Compose([Resize(size), SubtractMeans(mean), (lambda img, boxes=None, labels=None: ((img / std), boxes, labels)), ToTensor()]) def __call__(self, image): (image, _, _) = self.transform(image) return image
class HasOptimMethod(): def __init__(self): super(HasOptimMethod, self).__init__() self.optimMethod = SGD() def setOptimMethod(self, val): pythonBigDL_method_name = 'setOptimMethod' callZooFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val) self.optimMethod = val return self def getOptimMethod(self): return self.optimMethod
class XLMConfig(PretrainedConfig): pretrained_config_archive_map = XLM_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = 'xlm' def __init__(self, vocab_size=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=(2048 ** (- 0.5)), layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, mask_token_id=0, lang_id=0, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.emb_dim = emb_dim self.n_layers = n_layers self.n_heads = n_heads self.dropout = dropout self.attention_dropout = attention_dropout self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.use_lang_emb = use_lang_emb self.layer_norm_eps = layer_norm_eps self.bos_index = bos_index self.eos_index = eos_index self.pad_index = pad_index self.unk_index = unk_index self.mask_index = mask_index self.is_encoder = is_encoder self.max_position_embeddings = max_position_embeddings self.embed_init_std = embed_init_std self.init_std = init_std self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_proj_to_labels = summary_proj_to_labels self.summary_first_dropout = summary_first_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top self.mask_token_id = mask_token_id self.lang_id = lang_id if ('n_words' in kwargs): self.n_words = kwargs['n_words'] def n_words(self): return self.vocab_size _words.setter def n_words(self, value): self.vocab_size = value def hidden_size(self): return self.emb_dim def num_attention_heads(self): return self.n_heads def num_hidden_layers(self): return self.n_layers
class CheckpointMergerPipeline(DiffusionPipeline): def __init__(self): self.register_to_config() super().__init__() def _compare_model_configs(self, dict0, dict1): if (dict0 == dict1): return True else: (config0, meta_keys0) = self._remove_meta_keys(dict0) (config1, meta_keys1) = self._remove_meta_keys(dict1) if (config0 == config1): print(f'Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.') return True return False def _remove_meta_keys(self, config_dict: Dict): meta_keys = [] temp_dict = config_dict.copy() for key in config_dict.keys(): if key.startswith('_'): temp_dict.pop(key) meta_keys.append(key) return (temp_dict, meta_keys) _grad() def merge(self, pretrained_model_name_or_path_list: List[Union[(str, os.PathLike)]], **kwargs): cache_dir = kwargs.pop('cache_dir', DIFFUSERS_CACHE) resume_download = kwargs.pop('resume_download', False) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', False) use_auth_token = kwargs.pop('use_auth_token', None) revision = kwargs.pop('revision', None) torch_dtype = kwargs.pop('torch_dtype', None) device_map = kwargs.pop('device_map', None) alpha = kwargs.pop('alpha', 0.5) interp = kwargs.pop('interp', None) print('Received list', pretrained_model_name_or_path_list) print(f'Combining with alpha={alpha}, interpolation mode={interp}') checkpoint_count = len(pretrained_model_name_or_path_list) force = kwargs.pop('force', False) if ((checkpoint_count > 3) or (checkpoint_count < 2)): raise ValueError('Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being passed.') print('Received the right number of checkpoints') config_dicts = [] for pretrained_model_name_or_path in pretrained_model_name_or_path_list: config_dict = DiffusionPipeline.load_config(pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, force_download=force_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision) config_dicts.append(config_dict) comparison_result = True for idx in range(1, len(config_dicts)): comparison_result &= self._compare_model_configs(config_dicts[(idx - 1)], config_dicts[idx]) if ((not force) and (comparison_result is False)): raise ValueError('Incompatible checkpoints. Please check model_index.json for the models.') print(config_dicts[0], config_dicts[1]) print('Compatible model_index.json files found') cached_folders = [] for (pretrained_model_name_or_path, config_dict) in zip(pretrained_model_name_or_path_list, config_dicts): folder_names = [k for k in config_dict.keys() if (not k.startswith('_'))] allow_patterns = [os.path.join(k, '*') for k in folder_names] allow_patterns += [WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, ONNX_WEIGHTS_NAME, DiffusionPipeline.config_name] requested_pipeline_class = config_dict.get('_class_name') user_agent = {'diffusers': __version__, 'pipeline_class': requested_pipeline_class} cached_folder = (pretrained_model_name_or_path if os.path.isdir(pretrained_model_name_or_path) else snapshot_download(pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, allow_patterns=allow_patterns, user_agent=user_agent)) print('Cached Folder', cached_folder) cached_folders.append(cached_folder) final_pipe = DiffusionPipeline.from_pretrained(cached_folders[0], torch_dtype=torch_dtype, device_map=device_map) final_pipe.to(self.device) checkpoint_path_2 = None if (len(cached_folders) > 2): checkpoint_path_2 = os.path.join(cached_folders[2]) if (interp == 'sigmoid'): theta_func = CheckpointMergerPipeline.sigmoid elif (interp == 'inv_sigmoid'): theta_func = CheckpointMergerPipeline.inv_sigmoid elif (interp == 'add_diff'): theta_func = CheckpointMergerPipeline.add_difference else: theta_func = CheckpointMergerPipeline.weighted_sum for attr in final_pipe.config.keys(): if (not attr.startswith('_')): checkpoint_path_1 = os.path.join(cached_folders[1], attr) if os.path.exists(checkpoint_path_1): files = [*glob.glob(os.path.join(checkpoint_path_1, '*.safetensors')), *glob.glob(os.path.join(checkpoint_path_1, '*.bin'))] checkpoint_path_1 = (files[0] if (len(files) > 0) else None) if (len(cached_folders) < 3): checkpoint_path_2 = None else: checkpoint_path_2 = os.path.join(cached_folders[2], attr) if os.path.exists(checkpoint_path_2): files = [*glob.glob(os.path.join(checkpoint_path_2, '*.safetensors')), *glob.glob(os.path.join(checkpoint_path_2, '*.bin'))] checkpoint_path_2 = (files[0] if (len(files) > 0) else None) if ((checkpoint_path_1 is None) and (checkpoint_path_2 is None)): print(f'Skipping {attr}: not present in 2nd or 3d model') continue try: module = getattr(final_pipe, attr) if isinstance(module, bool): continue theta_0 = getattr(module, 'state_dict') theta_0 = theta_0() update_theta_0 = getattr(module, 'load_state_dict') theta_1 = (safetensors.torch.load_file(checkpoint_path_1) if checkpoint_path_1.endswith('.safetensors') else torch.load(checkpoint_path_1, map_location='cpu')) theta_2 = None if checkpoint_path_2: theta_2 = (safetensors.torch.load_file(checkpoint_path_2) if checkpoint_path_2.endswith('.safetensors') else torch.load(checkpoint_path_2, map_location='cpu')) if (not (theta_0.keys() == theta_1.keys())): print(f'Skipping {attr}: key mismatch') continue if (theta_2 and (not (theta_1.keys() == theta_2.keys()))): print(f'Skipping {attr}:y mismatch') except Exception as e: print(f'Skipping {attr} do to an unexpected error: {str(e)}') continue print(f'MERGING {attr}') for key in theta_0.keys(): if theta_2: theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha) else: theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha) del theta_1 del theta_2 update_theta_0(theta_0) del theta_0 return final_pipe def weighted_sum(theta0, theta1, theta2, alpha): return (((1 - alpha) * theta0) + (alpha * theta1)) def sigmoid(theta0, theta1, theta2, alpha): alpha = ((alpha * alpha) * (3 - (2 * alpha))) return (theta0 + ((theta1 - theta0) * alpha)) def inv_sigmoid(theta0, theta1, theta2, alpha): import math alpha = (0.5 - math.sin((math.asin((1.0 - (2.0 * alpha))) / 3.0))) return (theta0 + ((theta1 - theta0) * alpha)) def add_difference(theta0, theta1, theta2, alpha): return (theta0 + ((theta1 - theta2) * (1.0 - alpha)))
def load_model(args, model_without_ddp, optimizer, loss_scaler): checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) print(('Resume checkpoint %s' % args.resume)) if (('optimizer' in checkpoint) and ('epoch' in checkpoint)): optimizer.load_state_dict(checkpoint['optimizer']) args.start_epoch = (checkpoint['epoch'] + 1) if ('scaler' in checkpoint): loss_scaler.load_state_dict(checkpoint['scaler']) print('With optim!')
class CustomFormatter(logging.Formatter): grey = '\x1b[38;20m' green = '\x1b[32;20m' yellow = '\x1b[33;20m' red = '\x1b[31;20m' bold_red = '\x1b[31;1m' reset = '\x1b[0m' format = '[%(name)s] - %(levelname)s - %(message)s' FORMATS = {logging.DEBUG: (((grey + '[%(levelname)s]') + reset) + '%(message)7s'), logging.INFO: (((green + '[%(levelname)s]') + reset) + ' %(message)7s'), logging.WARNING: (((yellow + '[%(levelname)s]') + reset) + ' %(message)7s'), logging.ERROR: (((red + '[%(levelname)s]') + reset) + ' %(message)7s'), logging.CRITICAL: (((bold_red + '[%(levelname)s]') + reset) + '%(message)7s')} def format(self, record): log_fmt = self.FORMATS.get(record.levelno) formatter = logging.Formatter(log_fmt) return formatter.format(record)
def get_new_model_dir(root: str, model_name: str) -> str: (prev_run_ids, prev_run_dirs) = get_valid_model_dir(root) cur_id = (max(prev_run_ids, default=(- 1)) + 1) model_dir = os.path.join(root, f'{cur_id:05d}-{model_name}') assert (not os.path.exists(model_dir)) os.makedirs(model_dir) return model_dir
_tf _retrieval class TFRagModelSaveLoadTests(unittest.TestCase): def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator_config = AutoConfig.from_pretrained('facebook/bart-large-cnn') return RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=' / ', doc_sep=' // ', n_docs=5, max_combined_length=300, dataset='wiki_dpr', dataset_split='train', index_name='exact', index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8) def test_rag_sequence_from_pretrained(self): load_weight_prefix = 'tf_rag_model_1' rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids with tempfile.TemporaryDirectory() as tmp_dirname: rag_sequence = TFRagSequenceForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn', retriever=rag_retriever, config=rag_config) rag_sequence.save_pretrained(tmp_dirname) rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever) output = rag_sequence(input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del rag_sequence question_encoder = TFAutoModel.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator = TFAutoModelForSeq2SeqLM.from_pretrained('facebook/bart-large-cnn', load_weight_prefix=load_weight_prefix, name='generator') rag_sequence = TFRagSequenceForGeneration(config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever) output = rag_sequence(input_ids, labels=decoder_input_ids) loss_init = output.loss self.assertAlmostEqual(loss_pretrained, loss_init, places=4) def test_rag_token_from_pretrained(self): load_weight_prefix = 'tf_rag_model_1' rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids with tempfile.TemporaryDirectory() as tmp_dirname: rag_token = TFRagTokenForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn', retriever=rag_retriever, config=rag_config) rag_token.save_pretrained(tmp_dirname) rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever) output = rag_token(input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del rag_token question_encoder = TFAutoModel.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator = TFAutoModelForSeq2SeqLM.from_pretrained('facebook/bart-large-cnn', load_weight_prefix=load_weight_prefix, name='generator') rag_token = TFRagTokenForGeneration(config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever) output = rag_token(input_ids, labels=decoder_input_ids) loss_init = output.loss self.assertAlmostEqual(loss_pretrained, loss_init, places=4)
def Timer(func): (func) def wrapper(*args, **kwargs): start_time = datetime.now() construct_print(f'a new epoch start: {start_time}') func(*args, **kwargs) construct_print(f'the time of the epoch: {(datetime.now() - start_time)}') return wrapper
class SIMCLRGenerator(): def __call__(self, partition_list: List[str], **kwargs): return list(range(len(partition_list)))
def md_parse_line_break(comment): comment = comment.replace(' ', '\n\n') return comment.replace(' - ', '\n\n- ')
class NoStoppingCondition(StoppingCondition): def should_stop_this_iter(self, latest_trainer_result: dict, *args, **kwargs) -> bool: return False
def test_shufflenet_v2(): cfg.merge_from_file('configs/benchmarks/ghostnet/ghostnet_x1_0_zcls_imagenet_224.yaml') print(cfg) model = GhostNet(cfg) print(model) test_data(model)
def infer(): with tf.Graph().as_default() as graph: print('In Graph') (ops, tuple_shape) = build_inference_model() sess = restore_weights() num_loader_threads = 6 for i in range(num_loader_threads): worker = Thread(target=cpu_thread) worker.setDaemon(True) worker.start() worker = Thread(target=gpu_thread, args=(sess, ops)) worker.setDaemon(True) worker.start() csv_file = os.path.join(CSV_ROOT, '{}.csv'.format(SET)) meta = load_csv(csv_file) num = len(meta['path']) padding = [0 for i in range((IMAGES_PER_PASS - (num % IMAGES_PER_PASS)))] image_info = [meta['path'][i] for i in np.concatenate((np.arange(num), np.array(padding)))] padded_num = len(image_info) batched_indices = np.reshape(np.arange(padded_num), ((- 1), (TUPLES_PER_BATCH * sum(tuple_shape)))) batched_image_info = np.reshape(image_info, ((- 1), (TUPLES_PER_BATCH * sum(tuple_shape)))) for (batch_indices, batch_image_info) in zip(batched_indices, batched_image_info): CPU_IN_QUEUE.put((batch_indices, batch_image_info)) CPU_IN_QUEUE.join() GPU_IN_QUEUE.join() feature_pairs = list(GPU_OUT_QUEUE.queue) GPU_OUT_QUEUE.queue.clear() features = ([[]] * padded_num) for pair in feature_pairs: for (i, f) in zip(pair[0], pair[1]): features[i] = f features = features[:num] save_pickle(features, os.path.join(OUT_ROOT, '{}_{}.pickle'.format(SET, OUT_NAME)))
_on_pypy def test_pointer_to_member_fn(): for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]: buf = cls() buf.value = value = struct.unpack('i', bytearray(buf))[0] assert (value == )
class RobertaTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = RobertaTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space): pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if ('sep' in state): state['sep'] = tuple(state['sep']) if ('cls' in state): state['cls'] = tuple(state['cls']) changes_to_apply = False if (state.get('add_prefix_space', add_prefix_space) != add_prefix_space): state['add_prefix_space'] = add_prefix_space changes_to_apply = True if (state.get('trim_offsets', trim_offsets) != trim_offsets): state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) def mask_token(self) -> str: if (self._mask_token is None): if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) _token.setter def mask_token(self, value): value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value) self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id]) if (token_ids_1 is None): return output return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def set_seed_code(data_size, batch_size): backend = ('nccl' if torch.cuda.is_available() else 'gloo') res = atorch.init_distributed(backend, set_cuda_device_using_local_rank=True) if (not res): raise Exception('init failed') seed = 13 model_context = create_model_context(data_size=data_size, batch_size=batch_size, sampler_seed=seed) assert (model_context.sampler_seed == 13) atorch.auto.accelerate.EngineClient = FakeEasyDLClient atorch.auto.accelerate.AccelerationEngine = FakeEngine (status, res, _) = auto_accelerate(model_context.model, model_context.optim_func, model_context.dataset, loss_func=model_context.loss_func, prepare_input=model_context.prepare_input, optim_args=model_context.optim_args, dataloader_args=model_context.dataloader_args) assert status dataloader = res.dataloader sampler = dataloader.sampler assert isinstance(sampler, torch.data.utils.distributed.DistributedSampler) assert (sampler.seed == seed)
class TFBaseModelOutputWithCLSToken(ModelOutput): last_hidden_state: tf.Tensor = None cls_token_value: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None
class RetrievedOptions(SystemResponse): def __init__(self, session_token: str=None, retrieved_results: list=None): super().__init__(session_token) self.retrieved_results = retrieved_results self.type = 'RETRIEVED_OPTIONS' def update(self, retrieved_results: list=None): self.retrieved_results = retrieved_results def description(self) -> str: return f'[[ ]]'
class TFAlbertForMultipleChoice(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight (v, m) = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = ((w - m) / torch.sqrt((v + 1e-05))) return F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
def identity_transform(img_shape): data_transforms = transforms.Compose([transforms.ToTensor()]) return data_transforms
class DataInputTest(): def __init__(self, data, batch_size): self.batch_size = batch_size self.data = data self.epoch_size = (len(self.data) // self.batch_size) if ((self.epoch_size * self.batch_size) < len(self.data)): self.epoch_size += 1 self.i = 0 def __iter__(self): return self def __next__(self): if (self.i == self.epoch_size): raise StopIteration ts = self.data[(self.i * self.batch_size):min(((self.i + 1) * self.batch_size), len(self.data))] self.i += 1 (u, i, j, sl) = ([], [], [], []) for t in ts: u.append(t[0]) i.append(t[3][0]) j.append(t[3][1]) sl.append(len(t[1])) max_sl = max(sl) hist_i = np.zeros([len(ts), max_sl], np.int64) hist_t = np.zeros([len(ts), max_sl], np.float32) k = 0 for t in ts: for l in range(len(t[1])): hist_i[k][l] = t[1][l] hist_t[k][l] = t[2][l] k += 1 return (self.i, (u, i, j, hist_i, hist_t, sl))
_sentencepiece _tokenizers class TestMarian_MT_EN(MarianIntegrationTest): src = 'mt' tgt = 'en' src_text = ["Billi messu b'mod gentili, Gesu fejjaq ragel li kien milqut bil - marda kerha tal - gdiem."] expected_text = ['Touching gently, Jesus healed a man who was affected by the sad disease of leprosy.'] def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected()
class Text2ImageDataset(): def __init__(self, train_shards_path_or_url: Union[(str, List[str])], num_train_examples: int, per_gpu_batch_size: int, global_batch_size: int, num_workers: int, resolution: int=1024, shuffle_buffer_size: int=1000, pin_memory: bool=False, persistent_workers: bool=False, use_fix_crop_and_size: bool=False): if (not isinstance(train_shards_path_or_url, str)): train_shards_path_or_url = [list(braceexpand(urls)) for urls in train_shards_path_or_url] train_shards_path_or_url = list(itertools.chain.from_iterable(train_shards_path_or_url)) def get_orig_size(json): if use_fix_crop_and_size: return (resolution, resolution) else: return (int(json.get('original_width', 0.0)), int(json.get('original_height', 0.0))) def transform(example): image = example['image'] image = TF.resize(image, resolution, interpolation=transforms.InterpolationMode.BILINEAR) (c_top, c_left, _, _) = transforms.RandomCrop.get_params(image, output_size=(resolution, resolution)) image = TF.crop(image, c_top, c_left, resolution, resolution) image = TF.to_tensor(image) image = TF.normalize(image, [0.5], [0.5]) example['image'] = image example['crop_coords'] = ((c_top, c_left) if (not use_fix_crop_and_size) else (0, 0)) return example processing_pipeline = [wds.decode('pil', handler=wds.ignore_and_continue), wds.rename(image='jpg;png;jpeg;webp', text='text;txt;caption', orig_size='json', handler=wds.warn_and_continue), wds.map(filter_keys({'image', 'text', 'orig_size'})), wds.map_dict(orig_size=get_orig_size), wds.map(transform), wds.to_tuple('image', 'text', 'orig_size', 'crop_coords')] pipeline = [wds.ResampledShards(train_shards_path_or_url), tarfile_to_samples_nothrow, wds.select(WebdatasetFilter(min_size=960)), wds.shuffle(shuffle_buffer_size), *processing_pipeline, wds.batched(per_gpu_batch_size, partial=False, collation_fn=default_collate)] num_worker_batches = math.ceil((num_train_examples / (global_batch_size * num_workers))) num_batches = (num_worker_batches * num_workers) num_samples = (num_batches * global_batch_size) self._train_dataset = wds.DataPipeline(*pipeline).with_epoch(num_worker_batches) self._train_dataloader = wds.WebLoader(self._train_dataset, batch_size=None, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers) self._train_dataloader.num_batches = num_batches self._train_dataloader.num_samples = num_samples def train_dataset(self): return self._train_dataset def train_dataloader(self): return self._train_dataloader
def make_random_policy_bin_pack(bin_pack: BinPack) -> RandomPolicy: action_spec_num_values = bin_pack.action_spec().num_values return make_masked_categorical_random_ndim(action_spec_num_values=action_spec_num_values)
def get_score(occurences): if (occurences == 0): return 0 elif (occurences == 1): return 0.3 elif (occurences == 2): return 0.6 elif (occurences == 3): return 0.9 else: return 1
def readIntentPredTxt(intent_pred_txt, userIntent2id, sample_nb, userIntent_vocab_size): checkExistence(intent_pred_txt) indicator = np.zeros((sample_nb, userIntent_vocab_size)) with open(intent_pred_txt, 'rb') as f: for (idx, line) in enumerate(f): for intent in line.strip().split(';'): if (intent == 'null'): continue intent = 'intent-{}'.format(intent) if (intent in userIntent2id): pos = (userIntent2id[intent] - 1) else: pos = 0 indicator[(idx, pos)] = 1.0 return indicator
class RepLKBlock(nn.Module): def __init__(self, in_channels, dw_channels, block_lk_size, small_kernel, drop_path, small_kernel_merged=False): super().__init__() self.pw1 = conv_bn_relu(in_channels, dw_channels, 1, 1, 0, groups=1) self.pw2 = conv_bn(dw_channels, in_channels, 1, 1, 0, groups=1) self.large_kernel = ReparamLargeKernelConv(in_channels=dw_channels, out_channels=dw_channels, kernel_size=block_lk_size, stride=1, groups=dw_channels, small_kernel=small_kernel, small_kernel_merged=small_kernel_merged) self.lk_nonlinear = nn.ReLU() self.prelkb_bn = get_bn(in_channels) self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity()) print('drop path:', self.drop_path) def forward(self, x): out = self.prelkb_bn(x) out = self.pw1(out) out = self.large_kernel(out) out = self.lk_nonlinear(out) out = self.pw2(out) return (x + self.drop_path(out))
def test_can_move_left(board: Board, another_board: Board) -> None: assert can_move_left(board) assert can_move_left(another_board) board = jnp.array([[1, 2, 3, 4], [1, 2, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]]) assert (~ can_move_left(board))
class TestDummyGenerator(): def dummy_generator(self) -> DummyGenerator: return DummyGenerator() def test_dummy_generator__properties(self, dummy_generator: DummyGenerator) -> None: assert (dummy_generator.num_jobs == 3) assert (dummy_generator.num_machines == 3) assert (dummy_generator.max_num_ops == 3) assert (dummy_generator.max_op_duration == 4) def test_dummy_generator__call(self, dummy_generator: DummyGenerator) -> None: chex.clear_trace_counter() call_fn = jax.jit(chex.assert_max_traces(dummy_generator.__call__, n=1)) state1 = call_fn(jax.random.PRNGKey(1)) state2 = call_fn(jax.random.PRNGKey(2)) assert_trees_are_equal(state1, state2)
def get_egs_info(egs_dir): ipf = open(os.path.join(egs_dir, 'info', 'num_archives')) num_archives = int(ipf.readline().strip()) ipf.close() return num_archives
def load_model_ensemble_and_task(filenames, arg_overrides: Optional[Dict[(str, Any)]]=None, task=None, strict=True, suffix='', num_shards=1, state=None): assert ((state is None) or (len(filenames) == 1)) from fairseq import tasks assert (not (strict and (num_shards > 1))), 'Cannot load state dict with strict=True and checkpoint shards > 1' ensemble = [] cfg = None for filename in filenames: orig_filename = filename model_shard_state = {'shard_weights': [], 'shard_metadata': []} assert (num_shards > 0) st = time.time() for shard_idx in range(num_shards): filename = get_maybe_sharded_checkpoint_filename(orig_filename, suffix, shard_idx, num_shards) if (not PathManager.exists(filename)): raise IOError('Model file not found: {}'.format(filename)) if (state is None): state = load_checkpoint_to_cpu(filename, arg_overrides) if (('args' in state) and (state['args'] is not None)): cfg = convert_namespace_to_omegaconf(state['args']) elif (('cfg' in state) and (state['cfg'] is not None)): cfg = state['cfg'] else: raise RuntimeError(f'Neither args nor cfg exist in state keys = {state.keys()}') if (task is None): task = tasks.setup_task(cfg.task, from_checkpoint=True) if ('task_state' in state): task.load_state_dict(state['task_state']) argspec = inspect.getfullargspec(task.build_model) if (('fsdp_metadata' in state) and (num_shards > 1)): model_shard_state['shard_weights'].append(state['model']) model_shard_state['shard_metadata'].append(state['fsdp_metadata']) if (not has_FSDP): raise ImportError('Cannot find FullyShardedDataParallel. Please install fairscale with: pip install fairscale') if (shard_idx == (num_shards - 1)): consolidated_model_state = FSDP.consolidate_shard_weights(shard_weights=model_shard_state['shard_weights'], shard_metadata=model_shard_state['shard_metadata']) if ('from_checkpoint' in argspec.args): model = task.build_model(cfg.model, from_checkpoint=True) else: model = task.build_model(cfg.model) if (('optimizer_history' in state) and (len(state['optimizer_history']) > 0) and ('num_updates' in state['optimizer_history'][(- 1)])): model.set_num_updates(state['optimizer_history'][(- 1)]['num_updates']) model.load_state_dict(consolidated_model_state, strict=strict, model_cfg=cfg.model) else: if ('from_checkpoint' in argspec.args): model = task.build_model(cfg.model, from_checkpoint=True) else: model = task.build_model(cfg.model) if (('optimizer_history' in state) and (len(state['optimizer_history']) > 0) and ('num_updates' in state['optimizer_history'][(- 1)])): model.set_num_updates(state['optimizer_history'][(- 1)]['num_updates']) model.load_state_dict(state['model'], strict=strict, model_cfg=cfg.model) state = None if (((shard_idx % 10) == 0) and (shard_idx > 0)): elapsed = (time.time() - st) logger.info(f'Loaded {shard_idx} shards in {elapsed:.2f}s, {(elapsed / (shard_idx + 1)):.2f}s/shard') ensemble.append(model) return (ensemble, cfg, task)
def bias(shape, name='bias', value=0.0, dtype=None, trainable=True): if (dtype is None): dtype = tf.float32 b = tf.get_variable(name=name, shape=shape, initializer=tf.constant_initializer(value), dtype=dtype, trainable=trainable) return b
def tile(x, count, dim=0): perm = list(range(len(x.size()))) if (dim != 0): (perm[0], perm[dim]) = (perm[dim], perm[0]) x = x.permute(perm).contiguous() out_size = list(x.size()) out_size[0] *= count batch = x.size(0) x = x.view(batch, (- 1)).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size) if (dim != 0): x = x.permute(perm).contiguous() return x
def write_results_to_file(filename, data): with open(filename, 'wb') as handle: pickle.dump(data, handle, protocol=2)
class gradient_difference_loss(nn.Module): def __init__(self, gdl_weight=0.01): super().__init__() self.gdl_weight = float(gdl_weight) self.mse = nn.MSELoss() self.abs_loss = (lambda x, y: F.l1_loss(x, y).mean()) def forward(self, pred: torch.Tensor, gt: torch.Tensor): assert ((pred.max() <= 1) and (pred.min() >= 0)) assert ((gt.max() == 1) and (gt.min() == 0)) orig_gradient_x = (torch.roll(gt, dims=2, shifts=1) - gt) pred_gradient_x = (torch.roll(pred, dims=2, shifts=1) - pred) orig_gradient_y = (torch.roll(gt, dims=3, shifts=1) - gt) pred_gradient_y = (torch.roll(pred, dims=3, shifts=1) - pred) gdl_loss = (self.abs_loss(orig_gradient_x, pred_gradient_x) + self.abs_loss(orig_gradient_y, pred_gradient_y)) return (self.mse(pred, gt) + (self.gdl_weight * gdl_loss))
def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): wd = 1.0 expand_size = (((- 1),) + ((1,) * (len(p.shape) - 1))) for view_func in [_channel_view, _layer_view]: param_view = view_func(p) grad_view = view_func(grad) cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() if (cosine_sim.max() < (delta / math.sqrt(param_view.size(1)))): p_n = (p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size)) perturb -= (p_n * view_func((p_n * perturb)).sum(dim=1).reshape(expand_size)) wd = wd_ratio return (perturb, wd) return (perturb, wd)
class DreamBoothDataset(Dataset): def __init__(self, instance_data_root, instance_prompt, class_prompt, class_data_root=None, class_num=None, size=1024, repeats=1, center_crop=False): self.size = size self.center_crop = center_crop self.instance_prompt = instance_prompt self.custom_instance_prompts = None self.class_prompt = class_prompt if (args.dataset_name is not None): try: from datasets import load_dataset except ImportError: raise ImportError('You are trying to load your data using the datasets library. If you wish to train using custom captions please install the datasets library: `pip install datasets`. If you wish to load a local folder containing images only, specify --instance_data_dir instead.') dataset = load_dataset(args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir) column_names = dataset['train'].column_names if (args.image_column is None): image_column = column_names[0] logger.info(f'image column defaulting to {image_column}') else: image_column = args.image_column if (image_column not in column_names): raise ValueError(f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}") instance_images = dataset['train'][image_column] if (args.caption_column is None): logger.info('No caption column provided, defaulting to instance_prompt for all images. If your dataset contains captions/prompts for the images, make sure to specify the column as --caption_column') self.custom_instance_prompts = None else: if (args.caption_column not in column_names): raise ValueError(f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}") custom_instance_prompts = dataset['train'][args.caption_column] self.custom_instance_prompts = [] for caption in custom_instance_prompts: self.custom_instance_prompts.extend(itertools.repeat(caption, repeats)) else: self.instance_data_root = Path(instance_data_root) if (not self.instance_data_root.exists()): raise ValueError("Instance images root doesn't exists.") instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())] self.custom_instance_prompts = None self.instance_images = [] for img in instance_images: self.instance_images.extend(itertools.repeat(img, repeats)) self.num_instance_images = len(self.instance_images) self._length = self.num_instance_images if (class_data_root is not None): self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) if (class_num is not None): self.num_class_images = min(len(self.class_images_path), class_num) else: self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) else: self.class_data_root = None self.image_transforms = transforms.Compose([transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), (transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = self.instance_images[(index % self.num_instance_images)] instance_image = exif_transpose(instance_image) if (not (instance_image.mode == 'RGB')): instance_image = instance_image.convert('RGB') example['instance_images'] = self.image_transforms(instance_image) if self.custom_instance_prompts: caption = self.custom_instance_prompts[(index % self.num_instance_images)] if caption: example['instance_prompt'] = caption else: example['instance_prompt'] = self.instance_prompt else: example['instance_prompt'] = self.instance_prompt if self.class_data_root: class_image = Image.open(self.class_images_path[(index % self.num_class_images)]) class_image = exif_transpose(class_image) if (not (class_image.mode == 'RGB')): class_image = class_image.convert('RGB') example['class_images'] = self.image_transforms(class_image) example['class_prompt'] = self.class_prompt return example
class TFDebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def set_template(args): if (args.template.find('jpeg') >= 0): args.data_train = 'DIV2K_jpeg' args.data_test = 'DIV2K_jpeg' args.epochs = 200 args.decay = '100' if (args.template.find('EDSR_paper') >= 0): args.model = 'EDSR' args.n_resblocks = 32 args.n_feats = 256 args.res_scale = 0.1 if (args.template.find('MDSR') >= 0): args.model = 'MDSR' args.patch_size = 48 args.epochs = 650 if (args.template.find('DDBPN') >= 0): args.model = 'DDBPN' args.patch_size = 128 args.scale = '4' args.data_test = 'Set5' args.batch_size = 20 args.epochs = 1000 args.decay = '500' args.gamma = 0.1 args.weight_decay = 0.0001 args.loss = '1*MSE' if (args.template.find('GAN') >= 0): args.epochs = 200 args.lr = 5e-05 args.decay = '150' if (args.template.find('ACubeNet') >= 0): args.model = 'ACubeNet' args.n_resgroups = 4 args.n_resblocks = 4 args.n_feats = 64 if (args.template.find('VDSR') >= 0): args.model = 'VDSR' args.n_resblocks = 20 args.n_feats = 64 args.patch_size = 41 args.lr = 0.1
def test_pretty_text(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') with open(text_cfg_filename, 'w') as f: f.write(cfg.pretty_text) text_cfg = Config.fromfile(text_cfg_filename) assert (text_cfg._cfg_dict == cfg._cfg_dict)
class LSegModuleZS(LSegmentationModuleZS): def __init__(self, data_path, dataset, batch_size, base_lr, max_epochs, **kwargs): super(LSegModuleZS, self).__init__(data_path, dataset, batch_size, base_lr, max_epochs, **kwargs) label_list = self.get_labels(dataset) self.len_dataloader = len(label_list) if (kwargs['use_pretrained'] in ['False', False]): use_pretrained = False elif (kwargs['use_pretrained'] in ['True', True]): use_pretrained = True if (kwargs['backbone'] in ['clip_resnet101']): self.net = LSegRNNetZS(label_list=label_list, backbone=kwargs['backbone'], features=kwargs['num_features'], aux=kwargs['aux'], use_pretrained=use_pretrained, arch_option=kwargs['arch_option'], block_depth=kwargs['block_depth'], activation=kwargs['activation']) else: self.net = LSegNetZS(label_list=label_list, backbone=kwargs['backbone'], features=kwargs['num_features'], aux=kwargs['aux'], use_pretrained=use_pretrained, arch_option=kwargs['arch_option'], block_depth=kwargs['block_depth'], activation=kwargs['activation']) def get_labels(self, dataset): labels = [] path = 'label_files/fewshot_{}.txt'.format(dataset) assert os.path.exists(path), '*** Error : {} not exist !!!'.format(path) f = open(path, 'r') lines = f.readlines() for line in lines: label = line.strip() labels.append(label) f.close() print(labels) return labels def add_model_specific_args(parent_parser): parser = LSegmentationModuleZS.add_model_specific_args(parent_parser) parser = ArgumentParser(parents=[parser]) parser.add_argument('--backbone', type=str, default='vitb16_384', help='backbone network') parser.add_argument('--num_features', type=int, default=256, help='number of featurs that go from encoder to decoder') parser.add_argument('--dropout', type=float, default=0.1, help='dropout rate') parser.add_argument('--finetune_weights', type=str, help='load weights to finetune from') parser.add_argument('--no-scaleinv', default=True, action='store_false', help='turn off scaleinv layers') parser.add_argument('--no-batchnorm', default=False, action='store_true', help='turn off batchnorm') parser.add_argument('--widehead', default=False, action='store_true', help='wider output head') parser.add_argument('--widehead_hr', default=False, action='store_true', help='wider output head') parser.add_argument('--use_pretrained', type=str, default='True', help='whether use the default model to intialize the model') parser.add_argument('--arch_option', type=int, default=0, help='which kind of architecture to be used') parser.add_argument('--block_depth', type=int, default=0, help='how many blocks should be used') parser.add_argument('--activation', choices=['relu', 'lrelu', 'tanh'], default='relu', help='use which activation to activate the block') return parser
class ArgumentParser(): def __init__(self, mode='train'): self.parser = argparse.ArgumentParser(description='CNNGeometric PyTorch implementation') self.add_cnn_model_parameters() if (mode == 'train'): self.add_train_parameters() self.add_synth_dataset_parameters() self.add_base_train_parameters() elif (mode == 'eval'): self.add_eval_parameters() self.add_base_eval_parameters() def add_cnn_model_parameters(self): model_params = self.parser.add_argument_group('model') model_params.add_argument('--feature-extraction-cnn', type=str, default='vgg', help='feature extraction CNN model architecture: vgg/resnet101') model_params.add_argument('--feature-extraction-last-layer', type=str, default='', help='feature extraction CNN last layer') model_params.add_argument('--fr-kernel-sizes', nargs='+', type=int, default=[7, 5, 5], help='kernels sizes in feat.reg. conv layers') model_params.add_argument('--fr-channels', nargs='+', type=int, default=[225, 128, 64], help='channels in feat. reg. conv layers') model_params.add_argument('--matching-type', type=str, default='correlation', help='correlation/subtraction/concatenation') model_params.add_argument('--normalize-matches', type=str_to_bool, nargs='?', const=True, default=True, help='perform L2 normalization') def add_base_train_parameters(self): base_params = self.parser.add_argument_group('base') base_params.add_argument('--image-size', type=int, default=240, help='image input size') base_params.add_argument('--model', type=str, default='', help='Pre-trained model filename') base_params.add_argument('--num-workers', type=int, default=4, help='number of workers') def add_base_eval_parameters(self): base_params = self.parser.add_argument_group('base') base_params.add_argument('--image-size', type=int, default=240, help='image input size') base_params.add_argument('--model-1', type=str, default='', help='Trained model - stage 1') base_params.add_argument('--model-2', type=str, default='', help='Trained model - stage 2') base_params.add_argument('--num-of-iters', type=int, default=1, help='number of stages to use recursively') def add_synth_dataset_parameters(self): dataset_params = self.parser.add_argument_group('dataset') dataset_params.add_argument('--dataset-csv-path', type=str, default='', help='path to training transformation csv folder') dataset_params.add_argument('--dataset-image-path', type=str, default='', help='path to folder containing training images') dataset_params.add_argument('--four-point-hom', type=str_to_bool, nargs='?', const=True, default=True, help='use 4 pt parametrization for homography') dataset_params.add_argument('--random-sample', type=str_to_bool, nargs='?', const=True, default=True, help='sample random transformations') dataset_params.add_argument('--random-t', type=float, default=0.5, help='random transformation translation') dataset_params.add_argument('--random-s', type=float, default=0.5, help='random transformation translation') dataset_params.add_argument('--random-alpha', type=float, default=(1 / 6), help='random transformation translation') dataset_params.add_argument('--random-t-tps', type=float, default=0.4, help='random transformation translation') def add_train_parameters(self): train_params = self.parser.add_argument_group('train') train_params.add_argument('--lr', type=float, default=0.001, help='learning rate') train_params.add_argument('--lr_scheduler', type=str_to_bool, nargs='?', const=True, default=True, help='Bool (default True), whether to use a decaying lr_scheduler') train_params.add_argument('--lr_max_iter', type=int, default=1000, help='Number of steps between lr starting value and 1e-6 (lr default min) when choosing lr_scheduler') train_params.add_argument('--momentum', type=float, default=0.9, help='momentum constant') train_params.add_argument('--num-epochs', type=int, default=20, help='number of training epochs') train_params.add_argument('--batch-size', type=int, default=16, help='training batch size') train_params.add_argument('--weight-decay', type=float, default=0, help='weight decay constant') train_params.add_argument('--seed', type=int, default=1, help='Pseudo-RNG seed') train_params.add_argument('--use-mse-loss', type=str_to_bool, nargs='?', const=True, default=False, help='Use MSE loss on tnf. parameters') train_params.add_argument('--geometric-model', type=str, default='affine', help='affine/hom/tps') train_params.add_argument('--trained-model-fn', type=str, default='checkpoint_adam', help='trained model filename') train_params.add_argument('--trained-model-dir', type=str, default='trained_models', help='path to trained models folder') train_params.add_argument('--training-dataset', type=str, default='pascal', help='dataset to use for training') train_params.add_argument('--train-dataset-size', type=int, default=0, help='train dataset size limit') train_params.add_argument('--test-dataset-size', type=int, default=0, help='test dataset size limit') train_params.add_argument('--train-fe', type=str_to_bool, nargs='?', const=True, default=True, help='Train feature extraction') train_params.add_argument('--train-fr', type=str_to_bool, nargs='?', const=True, default=True, help='Train feature regressor') train_params.add_argument('--train-bn', type=str_to_bool, nargs='?', const=True, default=True, help='train batch-norm layers') train_params.add_argument('--fe-finetune-params', nargs='+', type=str, default=[''], help='String indicating the F.Ext params to finetune') train_params.add_argument('--update-bn-buffers', type=str_to_bool, nargs='?', const=True, default=False, help='Update batch norm running mean and std') train_params.add_argument('--occlusion-factor', type=float, default=0, help='occlusion factor for training') train_params.add_argument('--log_interval', type=int, default=100, help='Number of iterations between logs') train_params.add_argument('--log_dir', type=str, default='', help='If unspecified log_dir will be set to<trained_models_dir>/<trained_models_fn>/') def add_eval_parameters(self): eval_params = self.parser.add_argument_group('eval') eval_params.add_argument('--eval-dataset', type=str, default='pf', help='pf/caltech/tss') eval_params.add_argument('--eval-dataset-path', type=str, default='', help='Path to PF dataset') eval_params.add_argument('--flow-output-dir', type=str, default='results/', help='flow output dir') eval_params.add_argument('--pck-alpha', type=float, default=0.1, help='pck margin factor alpha') eval_params.add_argument('--eval-metric', type=str, default='pck', help='pck/distance') eval_params.add_argument('--tps-reg-factor', type=float, default=0.0, help='regularisation factor for tps tnf') eval_params.add_argument('--batch-size', type=int, default=16, help='batch size (only GPU)') def parse(self, arg_str=None): if (arg_str is None): args = self.parser.parse_args() else: args = self.parser.parse_args(arg_str.split()) arg_groups = {} for group in self.parser._action_groups: group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions} arg_groups[group.title] = group_dict return (args, arg_groups)
def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, default_cfgs[variant], pretrained) return model
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.boston_housing.load_data() scaler = preprocessing.MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) return ((X_train, y_train), (X_test, y_test))
class MemoryType(Enum): CPU_BUFFER = 0 GPU_BUFFER = 1 GPU_IMAGE = 2 MEMORY_NONE = 10000
def compute_similarity_transform_batch(S1, S2): S1_hat = np.zeros_like(S1) for i in range(S1.shape[0]): S1_hat[i] = compute_similarity_transform(S1[i], S2[i]) return S1_hat
def get_local_batch_size_in_trainer(global_batch_size: int, trainer: Trainer) -> int: strategy = get_trainer_strategy(trainer) devices = trainer.num_devices num_nodes = trainer.num_nodes if (not any([isinstance(strategy, supported_strategy) for supported_strategy in supported_strategies])): raise AttributeError(f'Strategy {strategy} not supported.') elif any([isinstance(strategy, tpu_strategy) for tpu_strategy in tpu_strategies]): return (global_batch_size // devices) elif any([isinstance(strategy, process_independent_strategy) for process_independent_strategy in process_independent_strategies]): return ((global_batch_size // devices) // num_nodes) elif any([isinstance(strategy, fully_dependent_strategy) for fully_dependent_strategy in fully_dependent_strategies]): return global_batch_size else: raise AttributeError(f'Strategy {strategy} not supported.')
def evaluate_json(gold: List, pred: List): for test_set in TEST_SETS: print(test_set) for language in LANGUAGES: instance_indices = [i for (i, instance) in enumerate(gold) if ((instance['test_set'] == test_set) and (instance['language'] == language))] gold_labels = [gold[i]['label'] for i in instance_indices] pred_labels = [pred[i]['label'] for i in instance_indices] if (not len(gold_labels)): continue score = f1_score(gold_labels, pred_labels, average='macro') print(language.upper(), (100 * score)) print()
class ScipyWrapperODESolver(metaclass=abc.ABCMeta): def __init__(self, func, y0, rtol, atol, min_step=0, max_step=float('inf'), solver='LSODA', **unused_kwargs): unused_kwargs.pop('norm', None) unused_kwargs.pop('grid_points', None) unused_kwargs.pop('eps', None) _handle_unused_kwargs(self, unused_kwargs) del unused_kwargs self.dtype = y0.dtype self.device = y0.device self.shape = y0.shape self.y0 = y0.detach().cpu().numpy().reshape((- 1)) self.rtol = rtol self.atol = atol self.min_step = min_step self.max_step = max_step self.solver = solver self.func = convert_func_to_numpy(func, self.shape, self.device, self.dtype) def integrate(self, t): if (t.numel() == 1): return torch.tensor(self.y0)[None].to(self.device, self.dtype) t = t.detach().cpu().numpy() sol = solve_ivp(self.func, t_span=[t.min(), t.max()], y0=self.y0, t_eval=t, method=self.solver, rtol=self.rtol, atol=self.atol, min_step=self.min_step, max_step=self.max_step) sol = torch.tensor(sol.y).T.to(self.device, self.dtype) sol = sol.reshape((- 1), *self.shape) return sol
def initialize_scheduler(optimizer, config, last_step=(- 1)): if (config.scheduler == 'StepLR'): return StepLR(optimizer, step_size=config.step_size, gamma=config.step_gamma, last_epoch=last_step) elif (config.scheduler == 'PolyLR'): return PolyLR(optimizer, max_iter=config.max_iter, power=config.poly_power, last_step=last_step) elif (config.scheduler == 'SquaredLR'): return SquaredLR(optimizer, max_iter=config.max_iter, last_step=last_step) elif (config.scheduler == 'ExpLR'): return ExpLR(optimizer, step_size=config.exp_step_size, gamma=config.exp_gamma, last_step=last_step) else: logging.error('Scheduler not supported')
_staging_test class SchedulerPushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f'test-scheduler-{identifier}' org_repo_id = f'valid_org/{repo_id}-org' def test_push_to_hub(self): scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=False, set_alpha_to_one=False) scheduler.push_to_hub(self.repo_id, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(f'{USER}/{self.repo_id}') assert (type(scheduler) == type(scheduler_loaded)) delete_repo(token=TOKEN, repo_id=self.repo_id) with tempfile.TemporaryDirectory() as tmp_dir: scheduler.save_config(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(f'{USER}/{self.repo_id}') assert (type(scheduler) == type(scheduler_loaded)) delete_repo(token=TOKEN, repo_id=self.repo_id) def test_push_to_hub_in_organization(self): scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=False, set_alpha_to_one=False) scheduler.push_to_hub(self.org_repo_id, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id) assert (type(scheduler) == type(scheduler_loaded)) delete_repo(token=TOKEN, repo_id=self.org_repo_id) with tempfile.TemporaryDirectory() as tmp_dir: scheduler.save_config(tmp_dir, repo_id=self.org_repo_id, push_to_hub=True, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id) assert (type(scheduler) == type(scheduler_loaded)) delete_repo(token=TOKEN, repo_id=self.org_repo_id)
class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return (tensor if (pos is None) else (tensor + pos)) def forward_post(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): q = k = self.with_pos_embed(src, pos) src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = (src + self.dropout1(src2)) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = (src + self.dropout2(src2)) src = self.norm2(src) return src def forward_pre(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): src2 = self.norm1(src) q = k = self.with_pos_embed(src2, pos) src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = (src + self.dropout1(src2)) src2 = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) src = (src + self.dropout2(src2)) return src def forward(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(src, src_mask, src_key_padding_mask, pos) '\n By default, self.normalize_before is False,\n so forward_post will be used\n ' return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TestNoIntegration(unittest.TestCase): def test_odeint(self): for reverse in (False, True): for dtype in DTYPES: for device in DEVICES: for method in METHODS: for ode in PROBLEMS: with self.subTest(reverse=reverse, dtype=dtype, device=device, ode=ode, method=method): (f, y0, t_points, sol) = construct_problem(dtype=dtype, device=device, ode=ode, reverse=reverse) y = torchdiffeq.odeint(f, y0, t_points[0:1], method=method) self.assertLess((sol[0] - y).abs().max(), 1e-12)
class IndexRanker(): def __init__(self, tensor, doclens, device): self.tensor = tensor self.doclens = doclens self.maxsim_dtype = torch.float32 self.doclens_pfxsum = ([0] + list(accumulate(self.doclens))) self.doclens = torch.tensor(self.doclens) self.doclens_pfxsum = torch.tensor(self.doclens_pfxsum) self.dim = self.tensor.size((- 1)) self.strides = [torch_percentile(self.doclens, p) for p in [90]] self.strides.append(self.doclens.max().item()) self.strides = sorted(list(set(self.strides))) print_message(f'#> Using strides {self.strides}..') self.views = self._create_views(self.tensor) self.bsize = (1 << 14) self.buffers = self._create_buffers(self.bsize, self.tensor.dtype, {'cpu', 'cuda:0'}) self.device = device def _create_views(self, tensor): views = [] for stride in self.strides: outdim = ((tensor.size(0) - stride) + 1) view = torch.as_strided(tensor, (outdim, stride, self.dim), (self.dim, self.dim, 1)) views.append(view) return views def _create_buffers(self, max_bsize, dtype, devices): buffers = {} for device in devices: buffers[device] = [torch.zeros(max_bsize, stride, self.dim, dtype=dtype, device=device, pin_memory=(device == 'cpu')) for stride in self.strides] return buffers def rank(self, Q, pids, views=None, shift=0): assert (len(pids) > 0) assert (Q.size(0) in [1, len(pids)]) Q = Q.contiguous().to(self.device).to(dtype=self.maxsim_dtype) views = (self.views if (views is None) else views) VIEWS_DEVICE = views[0].device D_buffers = self.buffers[str(VIEWS_DEVICE)] raw_pids = (pids if (type(pids) is list) else pids.tolist()) pids = (torch.tensor(pids) if (type(pids) is list) else pids) (doclens, offsets) = (self.doclens[pids], self.doclens_pfxsum[pids]) assignments = (doclens.unsqueeze(1) > (torch.tensor(self.strides).unsqueeze(0) + 1e-06)).sum((- 1)) one_to_n = torch.arange(len(raw_pids)) (output_pids, output_scores, output_permutation) = ([], [], []) for (group_idx, stride) in enumerate(self.strides): locator = (assignments == group_idx) if (locator.sum() < 1e-05): continue (group_pids, group_doclens, group_offsets) = (pids[locator], doclens[locator], offsets[locator]) group_Q = (Q if (Q.size(0) == 1) else Q[locator]) group_offsets = (group_offsets.to(VIEWS_DEVICE) - shift) (group_offsets_uniq, group_offsets_expand) = torch.unique_consecutive(group_offsets, return_inverse=True) D_size = group_offsets_uniq.size(0) D = torch.index_select(views[group_idx], 0, group_offsets_uniq, out=D_buffers[group_idx][:D_size]) D = D.to(self.device) D = D[group_offsets_expand.to(self.device)].to(dtype=self.maxsim_dtype) mask = (torch.arange(stride, device=self.device) + 1) mask = (mask.unsqueeze(0) <= group_doclens.to(self.device).unsqueeze((- 1))) scores = ((D group_Q) * mask.unsqueeze((- 1))) scores = scores.max(1).values.sum((- 1)).cpu() output_pids.append(group_pids) output_scores.append(scores) output_permutation.append(one_to_n[locator]) output_permutation = torch.cat(output_permutation).sort().indices output_pids = torch.cat(output_pids)[output_permutation].tolist() output_scores = torch.cat(output_scores)[output_permutation].tolist() assert (len(raw_pids) == len(output_pids)) assert (len(raw_pids) == len(output_scores)) assert (raw_pids == output_pids) return output_scores def batch_rank(self, all_query_embeddings, all_query_indexes, all_pids, sorted_pids): assert (sorted_pids is True) scores = [] (range_start, range_end) = (0, 0) for pid_offset in range(0, len(self.doclens), 50000): pid_endpos = min((pid_offset + 50000), len(self.doclens)) range_start = (range_start + (all_pids[range_start:] < pid_offset).sum()) range_end = (range_end + (all_pids[range_end:] < pid_endpos).sum()) pids = all_pids[range_start:range_end] query_indexes = all_query_indexes[range_start:range_end] print_message(f'###--> Got {len(pids)} query--passage pairs in this sub-range {(pid_offset, pid_endpos)}.') if (len(pids) == 0): continue print_message(f'###--> Ranking in batches the pairs #{range_start} through #{range_end} in this sub-range.') tensor_offset = self.doclens_pfxsum[pid_offset].item() tensor_endpos = (self.doclens_pfxsum[pid_endpos].item() + 512) collection = self.tensor[tensor_offset:tensor_endpos].to(self.device) views = self._create_views(collection) print_message(f'#> Ranking in batches of {self.bsize} query--passage pairs...') for (batch_idx, offset) in enumerate(range(0, len(pids), self.bsize)): if ((batch_idx % 100) == 0): print_message('#> Processing batch #{}..'.format(batch_idx)) endpos = (offset + self.bsize) (batch_query_index, batch_pids) = (query_indexes[offset:endpos], pids[offset:endpos]) Q = all_query_embeddings[batch_query_index] scores.extend(self.rank(Q, batch_pids, views, shift=tensor_offset)) return scores
def _encoded_image_string_tensor_input_placeholder(): batch_image_str_placeholder = tf.placeholder(dtype=tf.string, shape=[None], name='encoded_image_string_tensor') def decode(encoded_image_string_tensor): image_tensor = tf.image.decode_image(encoded_image_string_tensor, channels=3) image_tensor.set_shape((None, None, 3)) return image_tensor return (batch_image_str_placeholder, tf.map_fn(decode, elems=batch_image_str_placeholder, dtype=tf.uint8, parallel_iterations=32, back_prop=False))
class OutGate(object): def __init__(self, W_in=init.Normal(0.1), W_hid=init.Normal(0.1), W_cell=init.Normal(0.1), W_to=init.Normal(0.1), b=init.Constant(0.0), nonlinearity=nonlinearities.sigmoid): self.W_in = W_in self.W_hid = W_hid self.W_to = W_to if (W_cell is not None): self.W_cell = W_cell self.b = b if (nonlinearity is None): self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity
def find_deletable_span_rule_based_updated(tree: TreeNode, root_len: int, parent=None, grand_parent=None): next_parent = tree next_grandparent = parent deletable_bag = [] deletable_bag += det_JJ(tree) deletable_bag += det_PRN(tree) deletable_bag += det_ccs(tree, root_len) deletable_bag += det_pp(node=tree, parent=parent) deletable_bag += det_sbar(node=tree, root_len=root_len, parent=parent) deletable_bag += det_vp_vbg_vbn(tree=tree, parent=parent, grand_parent=grand_parent) deletable_bag += det_np_np(tree) deletable_bag += det_RB(tree, parent) deletable_bag += det_between_split(tree, root_len) deletable_bag += det_advp(tree, parent) deletable_bag += det_rb(tree) if (tree.children is not None): for (idx, child) in enumerate(tree.children): deletable_bag += find_deletable_span_rule_based_updated(child, root_len, parent=next_parent, grand_parent=next_grandparent) return deletable_bag
class ImageProjModel(torch.nn.Module): def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4): super().__init__() self.cross_attention_dim = cross_attention_dim self.clip_extra_context_tokens = clip_extra_context_tokens self.proj = torch.nn.Linear(clip_embeddings_dim, (self.clip_extra_context_tokens * cross_attention_dim)) self.norm = torch.nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds): embeds = image_embeds clip_extra_context_tokens = self.proj(embeds).reshape((- 1), self.clip_extra_context_tokens, self.cross_attention_dim) clip_extra_context_tokens = self.norm(clip_extra_context_tokens) return clip_extra_context_tokens
class PrioritizedReplayBuffer(ReplayBuffer): def __init__(self, size, alpha): super(PrioritizedReplayBuffer, self).__init__(size) assert (alpha >= 0) self._alpha = alpha it_capacity = 1 while (it_capacity < size): it_capacity *= 2 self._it_sum = SumSegmentTree(it_capacity) self._it_min = MinSegmentTree(it_capacity) self._max_priority = 1.0 def add(self, *args, **kwargs): idx = self._next_idx super().add(*args, **kwargs) self._it_sum[idx] = (self._max_priority ** self._alpha) self._it_min[idx] = (self._max_priority ** self._alpha) def _sample_proportional(self, batch_size): res = [] for _ in range(batch_size): mass = (random.random() * self._it_sum.sum(0, (len(self._storage) - 1))) idx = self._it_sum.find_prefixsum_idx(mass) res.append(idx) return res def sample(self, batch_size, beta): assert (beta > 0) idxes = self._sample_proportional(batch_size) weights = [] p_min = (self._it_min.min() / self._it_sum.sum()) max_weight = ((p_min * len(self._storage)) ** (- beta)) for idx in idxes: p_sample = (self._it_sum[idx] / self._it_sum.sum()) weight = ((p_sample * len(self._storage)) ** (- beta)) weights.append((weight / max_weight)) weights = np.array(weights) encoded_sample = self._encode_sample(idxes) return tuple((list(encoded_sample) + [weights, idxes])) def update_priorities(self, idxes, priorities): assert (len(idxes) == len(priorities)) for (idx, priority) in zip(idxes, priorities): assert (priority > 0) assert (0 <= idx < len(self._storage)) self._it_sum[idx] = (priority ** self._alpha) self._it_min[idx] = (priority ** self._alpha) self._max_priority = max(self._max_priority, priority)
class ClipAdapter(nn.Module): def __init__(self, clip_model_name: str, prompt_learner: PromptExtractor): super().__init__() self.clip_model = build_clip_model(clip_model_name) self.prompt_learner = prompt_learner self.prompt_learner.init_buffer(self.clip_model) self.text_feature_buffer = {} self.prompt_learner.init_task_prompt(self.clip_model) def forward(self, image: torch.Tensor, text: List[str], mask, **kwargs): image = self._preprocess_image(image, **kwargs) text_feature = self.get_text_features(text) image_features = self.get_image_features(image, mask) return self.get_sim_logits(text_feature, image_features) def _preprocess_image(self, image: torch.Tensor): return image def _get_text_features(self, noun_list: List[str]): if (not self.prompt_learner.with_trainable_params): left_noun_list = [noun for noun in noun_list if (noun not in self.text_feature_buffer)] if (len(left_noun_list) > 0): left_text_features = self.prompt_learner(left_noun_list, self.clip_model) self.text_feature_buffer.update({noun: text_feature for (noun, text_feature) in zip(left_noun_list, left_text_features)}) return torch.stack([self.text_feature_buffer[noun] for noun in noun_list]) else: text_features = self.prompt_learner(noun_list, self.clip_model) self.text_feature_buffer.update({noun: text_feature.detach() for (noun, text_feature) in zip(noun_list, text_features)}) return text_features def get_text_features(self, noun_list: List[str]): return self._get_text_features(noun_list) def get_image_features(self, image: torch.Tensor, mask=None): image_features = self.clip_model.visual(image, mask) image_features = (image_features / image_features.norm(dim=(- 1), keepdim=True)) return image_features def get_sim_logits(self, text_features: torch.Tensor, image_features: torch.Tensor, temperature: float=100): return (temperature * image_features.matmul(text_features.transpose((- 1), (- 2)))) def normalize_feature(self, feat: torch.Tensor): return (feat / feat.norm(dim=(- 1), keepdim=True))
_start_docstrings('XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer\n on top of the pooled output) e.g. for GLUE tasks. ', XLM_ROBERTA_START_DOCSTRING) class TFXLMRobertaForSequenceClassification(TFRobertaForSequenceClassification): config_class = XLMRobertaConfig
def save(opt): opt_path = opt['opt_path'] opt_path_copy = opt['path']['options'] (dirname, filename_ext) = os.path.split(opt_path) (filename, ext) = os.path.splitext(filename_ext) dump_path = os.path.join(opt_path_copy, ((filename + get_timestamp()) + ext)) with open(dump_path, 'w') as dump_file: json.dump(opt, dump_file, indent=2)
class Loss_Saver(): def __init__(self): (self.loss_list, self.last_loss) = ([], 0.0) return def updata(self, value): if (not self.loss_list): self.loss_list += [value] self.last_loss = value else: update_val = ((self.last_loss * 0.9) + (value * 0.1)) self.loss_list += [update_val] self.last_loss = update_val return def loss_drawing(self): print(self.loss_list) return
class HRSCDDataModule(BaseDataModule): def __init__(self, root: str='.data/HRSCD', transform: Compose=Compose([ToTensor()]), *args, **kwargs): super().__init__(*args, **kwargs) self.root = root self.transform = transform def setup(self, stage: Optional[str]=None): dataset = HRSCD(root=self.root, transform=self.transform) (self.train_dataset, self.val_dataset, self.test_dataset) = dataset_split(dataset, val_pct=self.val_split, test_pct=self.test_split)
class DataPointFactory(): def get_datapoint(config): if (config.task_type == TaskType.Classification): if (config.model_type in [ModelType.NaiveBayes, ModelType.XGBoost, ModelType.SVM]): return TFIDFDataPoint elif (config.model_type in [ModelType.LSTM, ModelType.BiLSTM, ModelType.TRANSFORMERENCODER]): return SequenceDataPoint elif (config.model_type in [ModelType.TreeLSTM, ModelType.GCN, ModelType.GAT, ModelType.GGNN]): return GraphDataPoint else: raise SystemExit(NotImplementedError(('Unknown Model Type in Datapoint Factory for classification' % config.model_type))) elif (config.task_type == TaskType.PairwiseClassification): if (config.model_type in [ModelType.NaiveBayes, ModelType.XGBoost, ModelType.SVM]): return TFIDFPairwiseDataPoint elif (config.model_type in [ModelType.LSTM, ModelType.BiLSTM, ModelType.TRANSFORMERENCODER]): return SequencePairwiseDataPoint elif (config.model_type == ModelType.TreeLSTM): return TFIDFPairwiseDataPoint elif (config.model_type in [ModelType.GAT, ModelType.GCN, ModelType.GGNN]): return SingleEdgeGraphPairwiseDataPoint else: raise SystemExit(NotImplementedError(('Unknown Model Type in Datapoint Factory for pairwise classification' % config.model_type))) if (config.experiment_mode in [ExperimentMode.XGBoost_Classify, ExperimentMode.SVM_Classify, ExperimentMode.NaiveBayes_Classify]): return TFIDFDataPoint elif (config.experiment_mode in [ExperimentMode.LSTM_Classify, ExperimentMode.BiLSTM_Classify, ExperimentMode.TRANSFORMERENCODER_Classify, ExperimentMode.LSTM_SummarizeSingleVocab, ExperimentMode.LSTM_SummarizeDoubleVocab, ExperimentMode.TRANSFORMERENCODER_SummarizeSingleVocab]): return SequenceDataPoint elif (config.experiment_mode in [ExperimentMode.GCN_Classify, ExperimentMode.GAT_Classify, ExperimentMode.TreeLSTM_Classify, ExperimentMode.TreeLSTM_SummarizeSingleVocab, ExperimentMode.GCN_SummarizeSingleVocab, ExperimentMode.GAT_SummarizeSingleVocab]): return GraphDataPoint elif (config.experiment_mode == ExperimentMode.GGNN_Classify): return GraphDataPoint elif (config.experiment_mode in [ExperimentMode.SVM_PairwiseClassify, ExperimentMode.XGBoost_PairwiseClassify, ExperimentMode.NaiveBayes_PairwiseClassify]): return TFIDFPairwiseDataPoint elif (config.experiment_mode in [ExperimentMode.LSTM_PairwiseClassify, ExperimentMode.BiLSTM_PairwiseClassify, ExperimentMode.TRANSFORMERENCODER_PairwiseClassify]): return SequencePairwiseDataPoint elif (config.experiment_mode in [ExperimentMode.TreeLSTM_PairwiseClassify, ExperimentMode.GCN_PairwiseClassify, ExperimentMode.GAT_PairwiseClassify, ExperimentMode.GGNN_PairwiseClassify]): return SingleEdgeGraphPairwiseDataPoint else: raise SystemExit(NotImplementedError(('Experiment Type %s is not implemented in Datapoint' % config.experiment_mode)))
class TestPrune(unittest.TestCase): SPARSITY_TARGET = 0.8 def setUp(self) -> None: set_seed(8888) def _test_model(self, mask_type): model = Model(mask_type) with self.subTest(f'{mask_type} : Initial sparsity check'): self.assertEqual(model.get_sparsity(active=False), 0, 'Initial sparsity should be zero') if (model.mask_type in ((prune.MAG_HARD + prune.LOTTERY) + [prune.SNIP])): if (model.mask_type == prune.SNIP): (inputs, labels) = model.get_inputs_and_labels() loss = model.compute_loss(model(inputs), labels) loss.backward() model.update_masks_once(sparsity_target=self.SPARSITY_TARGET) with self.subTest(f'{mask_type} : One-shot pruning sparsity check'): self.assertAlmostEqual(model.get_sparsity(active=True), self.SPARSITY_TARGET, delta=0.05, msg=f'Sparsity should be {self.SPARSITY_TARGET}') model.train_self(self.SPARSITY_TARGET) with self.subTest(f'{mask_type} : Final sparsity check'): self.assertAlmostEqual(model.get_sparsity(active=True), self.SPARSITY_TARGET, delta=(0.3 if (mask_type == prune.REGULAR) else 0.05), msg=f'Sparsity should be {self.SPARSITY_TARGET}') with self.subTest(f'{mask_type} : Active sparsity check'): self.assertGreater(model.get_sparsity(active=True), model.get_sparsity(active=False), 'Active sparsity should be higher as `out` layer is not pruned.') with self.subTest(f'{mask_type} : Weight sparsity check'): self.assertEqual(model.get_weight_sparsity(), 0, 'Weights should not be pruned yet.') model.prune_weights() with self.subTest(f'{mask_type} : Weight sparsity check'): self.assertAlmostEqual(model.get_weight_sparsity(), self.SPARSITY_TARGET, delta=0.3, msg=f'Weight sparsity after pruning should be {self.SPARSITY_TARGET}') def test_prune(self): for mask_type in (prune.REGULAR, prune.MAG_BLIND, prune.MAG_DIST, prune.MAG_UNIFORM, prune.SNIP, prune.MAG_GRAD_BLIND, prune.MAG_GRAD_UNIFORM, prune.LOTTERY_MAG_BLIND, prune.LOTTERY_MAG_UNIFORM, prune.LOTTERY_MAG_DIST): sub_test = f'Testing mask_type = {mask_type}' with self.subTest(sub_test): print(sub_test) self._test_model(mask_type)
def register_all_lvis(root='datasets'): for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_LVIS.items(): for (key, (image_root, json_file)) in splits_per_dataset.items(): register_lvis_instances(key, get_lvis_instances_meta(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root))
_module() class TridentFasterRCNN(FasterRCNN): 'Implementation of `TridentNet < def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None): super(TridentFasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) assert (self.backbone.num_branch == self.roi_head.num_branch) assert (self.backbone.test_branch_idx == self.roi_head.test_branch_idx) self.num_branch = self.backbone.num_branch self.test_branch_idx = self.backbone.test_branch_idx def simple_test(self, img, img_metas, proposals=None, rescale=False): assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if (proposals is None): num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1) trident_img_metas = (img_metas * num_branch) proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) else: proposal_list = proposals return self.roi_head.simple_test(x, proposal_list, trident_img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): x = self.extract_feats(imgs) num_branch = (self.num_branch if (self.test_branch_idx == (- 1)) else 1) trident_img_metas = [(img_metas * num_branch) for img_metas in img_metas] proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) return self.roi_head.aug_test(x, proposal_list, img_metas, rescale=rescale) def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): trident_gt_bboxes = tuple((gt_bboxes * self.num_branch)) trident_gt_labels = tuple((gt_labels * self.num_branch)) trident_img_metas = tuple((img_metas * self.num_branch)) return super(TridentFasterRCNN, self).forward_train(img, trident_img_metas, trident_gt_bboxes, trident_gt_labels)
class Annotation(object): def __init__(self): self.gender = None self.name_a_coref = None self.name_b_coref = None
_start_docstrings('The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.', SEGFORMER_START_DOCSTRING) class TFSegformerModel(TFSegformerPreTrainedModel): def __init__(self, config: SegformerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.segformer = TFSegformerMainLayer(config, name='segformer') _inputs _start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')) _code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def call(self, pixel_values: tf.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[(Tuple, TFBaseModelOutput)]: outputs = self.segformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput: return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=output.hidden_states, attentions=output.attentions)
class CometCallback(TrainerCallback): def __init__(self): if (not _has_comet): raise RuntimeError('CometCallback requires comet-ml to be installed. Run `pip install comet-ml`.') self._initialized = False self._log_assets = False def setup(self, args, state, model): self._initialized = True log_assets = os.getenv('COMET_LOG_ASSETS', 'FALSE').upper() if (log_assets in {'TRUE', '1'}): self._log_assets = True if state.is_world_process_zero: comet_mode = os.getenv('COMET_MODE', 'ONLINE').upper() experiment = None experiment_kwargs = {'project_name': os.getenv('COMET_PROJECT_NAME', 'huggingface')} if (comet_mode == 'ONLINE'): experiment = comet_ml.Experiment(**experiment_kwargs) experiment.log_other('Created from', 'transformers') logger.info('Automatic Comet.ml online logging enabled') elif (comet_mode == 'OFFLINE'): experiment_kwargs['offline_directory'] = os.getenv('COMET_OFFLINE_DIRECTORY', './') experiment = comet_ml.OfflineExperiment(**experiment_kwargs) experiment.log_other('Created from', 'transformers') logger.info('Automatic Comet.ml offline logging enabled; use `comet upload` when finished') if (experiment is not None): experiment._set_model_graph(model, framework='transformers') experiment._log_parameters(args, prefix='args/', framework='transformers') if hasattr(model, 'config'): experiment._log_parameters(model.config, prefix='config/', framework='transformers') def on_train_begin(self, args, state, control, model=None, **kwargs): if (not self._initialized): self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if (not self._initialized): self.setup(args, state, model) if state.is_world_process_zero: experiment = comet_ml.config.get_global_experiment() if (experiment is not None): experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework='transformers') def on_train_end(self, args, state, control, **kwargs): if (self._initialized and state.is_world_process_zero): experiment = comet_ml.config.get_global_experiment() if ((experiment is not None) and (self._log_assets is True)): logger.info('Logging checkpoints. This may take time.') experiment.log_asset_folder(args.output_dir, recursive=True, log_file_name=True, step=state.global_step) experiment.end()
class CUHK01(ImageDataset): dataset_dir = 'cuhk01' dataset_url = None def __init__(self, root='', split_id=0, **kwargs): self.root = osp.abspath(osp.expanduser(root)) self.dataset_dir = osp.join(self.root, self.dataset_dir) self.download_dataset(self.dataset_dir, self.dataset_url) self.zip_path = osp.join(self.dataset_dir, 'CUHK01.zip') self.campus_dir = osp.join(self.dataset_dir, 'campus') self.split_path = osp.join(self.dataset_dir, 'splits.json') self.extract_file() required_files = [self.dataset_dir, self.campus_dir] self.check_before_run(required_files) self.prepare_split() splits = read_json(self.split_path) if (split_id >= len(splits)): raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, (len(splits) - 1))) split = splits[split_id] train = split['train'] query = split['query'] gallery = split['gallery'] train = [tuple(item) for item in train] query = [tuple(item) for item in query] gallery = [tuple(item) for item in gallery] super(CUHK01, self).__init__(train, query, gallery, **kwargs) def extract_file(self): if (not osp.exists(self.campus_dir)): print('Extracting files') zip_ref = zipfile.ZipFile(self.zip_path, 'r') zip_ref.extractall(self.dataset_dir) zip_ref.close() def prepare_split(self): if (not osp.exists(self.split_path)): print('Creating 10 random splits of train ids and test ids') img_paths = sorted(glob.glob(osp.join(self.campus_dir, '*.png'))) img_list = [] pid_container = set() for img_path in img_paths: img_name = osp.basename(img_path) pid = (int(img_name[:4]) - 1) camid = ((int(img_name[4:7]) - 1) // 2) img_list.append((img_path, pid, camid)) pid_container.add(pid) num_pids = len(pid_container) num_train_pids = (num_pids // 2) splits = [] for _ in range(10): order = np.arange(num_pids) np.random.shuffle(order) train_idxs = order[:num_train_pids] train_idxs = np.sort(train_idxs) idx2label = {idx: label for (label, idx) in enumerate(train_idxs)} (train, test_a, test_b) = ([], [], []) for (img_path, pid, camid) in img_list: if (pid in train_idxs): train.append((img_path, idx2label[pid], camid)) elif (camid == 0): test_a.append((img_path, pid, camid)) else: test_b.append((img_path, pid, camid)) split = {'train': train, 'query': test_a, 'gallery': test_b, 'num_train_pids': num_train_pids, 'num_query_pids': (num_pids - num_train_pids), 'num_gallery_pids': (num_pids - num_train_pids)} splits.append(split) split = {'train': train, 'query': test_b, 'gallery': test_a, 'num_train_pids': num_train_pids, 'num_query_pids': (num_pids - num_train_pids), 'num_gallery_pids': (num_pids - num_train_pids)} splits.append(split) print('Totally {} splits are created'.format(len(splits))) write_json(splits, self.split_path) print('Split file saved to {}'.format(self.split_path))