code stringlengths 101 5.91M |
|---|
def flat_waveform():
wave = np.ones((24000,))
return sound.Waveform(signal=wave, sample_rate=24000) |
def main(args):
models = [x[0] for x in args.model]
tokenizer = AutoTokenizer.from_pretrained(models[0], model_max_length=sys.maxsize, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
dataset = load_dataset('emozilla/quality', split=args.split)
dataset = dataset.map((lambda sample: {'prompt': get_prompt(sample)}))
if args.max_tokens:
dataset = dataset.filter((lambda sample: (len(tokenizer(sample['prompt']).input_ids) <= args.max_tokens)))
choice_tokens = [x[0] for x in tokenizer(CHOICES, add_special_tokens=False).input_ids]
decoded_choice = tokenizer.decode(choice_tokens, clean_up_tokenization_spaces=True)
results = []
for model in models:
torch.cuda.empty_cache()
loaded = load_model_and_apply_patches(model, args)
correct_answers = 0
i = 0
max = (len(dataset) if (args.limit is None) else args.limit)
bar = tqdm(total=max)
while (i < max):
sample = dataset[i]
tokenized_prompt = tokenizer(sample['prompt'], return_tensors='pt')
input_ids = tokenized_prompt.input_ids.to('cuda')
attention_mask = tokenized_prompt.attention_mask.to('cuda')
output = loaded.generate(input_ids, attention_mask=attention_mask, max_new_tokens=1, return_dict_in_generate=True, output_scores=True, pad_token_id=tokenizer.eos_token_id)
scores = output.scores[0][0]
choice_scores = [x.cpu() for x in [scores[choice_tokens[0]], scores[choice_tokens[1]], scores[choice_tokens[2]], scores[choice_tokens[3]]]]
selection = numpy.argmax([x.float().cpu() for x in choice_scores])
correct_answers += (1 if (selection == sample['answer']) else 0)
if args.print_choices:
print(f"Choice: {CHOICES[selection]} Correct: {CHOICES[sample['answer']]}")
i += 1
percent = ((correct_answers / i) * 100.0)
bar.desc = f'{model}: {percent:.1f}%'
bar.update()
percent = (correct_answers / max)
results.append(str(percent))
if args.output_file:
with open(args.output_file, 'w', encoding='utf-8') as f:
f.write((','.join(models) + '\n'))
f.write((','.join(results) + '\n')) |
class ObservableState(object):
def __init__(self, px, py, vx, vy, radius):
self.px = px
self.py = py
self.vx = vx
self.vy = vy
self.radius = radius
self.position = (self.px, self.py)
self.velocity = (self.vx, self.vy)
def __add__(self, other):
return (other + (self.px, self.py, self.vx, self.vy, self.radius))
def __str__(self):
return ' '.join([str(x) for x in [self.px, self.py, self.vx, self.vy, self.radius]]) |
(InducingImages, Conv2d, object)
def _Kfu_conv2d(feat: InducingImages, kern: Conv2d, Xnew: tf.Tensor, full_spatial: bool=False):
if (not isinstance(kern.kernel, kernels.Stationary)):
return _Kfu_conv2d_fallback(feat, kern, Xnew, full_spatial)
patch_shape = list(kern.patch_shape)
channels_in = Xnew.shape[((- 3) if (kern.data_format == 'NCHW') else (- 1))]
precis = tf.square(tf.math.reciprocal(kern.kernel.lengthscales))
if kern.kernel.ard:
filters = tf.reshape(precis, (patch_shape + [channels_in, 1]))
else:
filters = tf.fill((patch_shape + [channels_in, 1]), precis)
r2 = tf.transpose(tf.nn.conv2d(input=tf.square(feat.as_images), filters=filters, strides=[1, 1], padding='VALID'))
X = tf.reshape(Xnew, ([(- 1)] + list(Xnew.shape)[(- 3):]))
r2 += kern.convolve(tf.square(X), filters)
filters *= feat.as_filters
r2 -= (2 * kern.convolve(X, filters))
Kxz = kern.kernel.K_r2(r2)
if (not full_spatial):
Kxz = tf.reshape(Kxz, (list(Kxz.shape[:(- 3)]) + [(- 1), len(feat)]))
if (kern.weights is None):
Kxz = tf.reduce_mean(Kxz, axis=(- 2))
else:
Kxz = tf.tensordot(Kxz, kern.weights, [(- 2), (- 1)])
return tf.reshape(Kxz, (list(Xnew.shape[:(- 3)]) + list(Kxz.shape[1:]))) |
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1):
beta = np.linspace(start=((np.pi / 2) - max_beta), stop=((np.pi / 2) + max_beta), num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid)) |
def write_body(fd, shape, out_strings):
bytes_cnt = 0
bytes_cnt = write_uints(fd, (shape[0], shape[1], len(out_strings)))
for s in out_strings:
bytes_cnt += write_uints(fd, (len(s[0]),))
bytes_cnt += write_bytes(fd, s[0])
return bytes_cnt |
def _construct_dataset(num_episodes, num_groups=10):
episodes = []
for i in range(num_episodes):
episode = Episode(episode_id=str(i), scene_id=('scene_id_' + str((i % num_groups))), start_position=[0, 0, 0], start_rotation=[0, 0, 0, 1])
episodes.append(episode)
dataset = Dataset()
dataset.episodes = episodes
return dataset |
class TrainingModule():
ALL_METRICS = ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'METEOR', 'ROUGE_L', 'CIDEr', 'SPICE']
SCST_SAMPLE = ['beam_search', 'random']
SCST_BASELINE = ['greedy', 'sample']
config: Config
data: KarpathyDataset
collate_fn: Dict[(str, Callable)]
model: nn.Module
optimizer: torch.optim.Optimizer
tokenizer: Tokenizer
scst_scorer: CaptionScorer
checkpoint_path = str
def __init__(self, config: Config):
super().__init__()
self.config = config
os.makedirs(config.log_dir, exist_ok=True)
self.data = get_dataset(config.dataset)(config)
self.data.prepare_data()
self.tokenizer = get_tokenizer(config.tokenizer)(config)
logger.info(f'{self.__class__.__name__}: Vocab size = {config.vocab_size}')
logger.info(f'{self.__class__.__name__}: Token IDs: BOS = {config.bos_token_id}, EOS = {config.eos_token_id}, UNK = {config.unk_token_id}, PAD = {config.pad_token_id}, ')
model_cls = get_model(config.caption_model)
self.model = model_cls(config)
if (config.get('cache_min_free_ram', 1) < 1):
from multiprocessing import Manager
manager = Manager()
cache_dict = manager.dict()
else:
cache_dict = None
self.collate_fn = {'train': model_cls.COLLATE_FN(config=config, tokenizer=self.tokenizer, cache_dict=cache_dict), 'eval': model_cls.COLLATE_FN(config=config, tokenizer=self.tokenizer)}
self.checkpoint_path = os.path.join(config.log_dir, 'model_{}.pth')
self.optimizer_path = os.path.join(config.log_dir, 'optimizer_{}.pth')
def train_dataloader(self):
logger.debug(f'{self.__class__.__name__}: Setting up dataloader for train split')
return self.get_dataloader('train', collate_fn=self.collate_fn['train'], generation_mode=True)
def val_dataloader(self):
logger.debug(f'{self.__class__.__name__}: Setting up dataloader for validation split')
return self.get_dataloader('val', collate_fn=self.collate_fn['eval'], generation_mode=True)
def test_dataloader(self):
logger.debug(f'{self.__class__.__name__}: Setting up dataloader for test split')
return self.get_dataloader('test', collate_fn=self.collate_fn['eval'], generation_mode=True)
def get_dataloader(self, split: str, collate_fn: Callable, generation_mode: bool=False):
if (split not in ('train', 'val', 'test')):
error_mssg = f"Invalid split `{split}`, please pass in one of ('train', 'val', 'test')."
raise ValueError(error_mssg)
is_training = (split == 'train')
if is_training:
batch_size = self.config.batch_size
else:
batch_size = self.config.get('batch_size_eval', self.config.batch_size)
data_loader = DataLoader(dataset=self.data.get_split(split, generation_mode), batch_size=batch_size, shuffle=is_training, num_workers=self.config.num_workers, collate_fn=collate_fn, pin_memory=True, drop_last=is_training)
return data_loader
def prepare(self):
config = self.config
assert (config.max_epochs > 0), '`config.max_epochs` should be > 0'
assert (config.beam_size_val > 0), '`config.beam_size_val` should be > 0'
assert (config.save_checkpoint_every > 0), '`config.save_checkpoint_every` should be > 0'
assert (config.losses_log_every > 0), '`config.losses_log_every` should be > 0'
if (config.cached_tokens is None):
config.cached_tokens = os.path.join(config.dataset_dir, 'bu', 'coco-train-words')
self.config_path = self.config.save_config(exist_ok=False)
self.train_loader = self.train_dataloader()
self.val_loader = self.val_dataloader()
self.tb_summary_writer = SummaryWriter(config.log_dir)
self.scst_scorer = CaptionScorer(config.cached_tokens, cider_weight=config.scst_cider_weight, bleu_weight=config.scst_bleu_weight)
self.global_step = 0
self.max_train_step = config.max_train_step = (config.max_epochs * len(self.train_loader))
self.best_val_score = 0.0
config.best_global_step = 0
def maybe_load_checkpoint(self, strict=True):
config = self.config
model = self.model
if (not config.start_from):
return None
if os.path.isfile(config.start_from):
restore_dir = os.path.dirname(config.start_from)
model_file = config.start_from
elif os.path.isdir(config.start_from):
restore_dir = config.start_from
if config.get('resume_training', False):
model_file = os.path.join(config.start_from, 'model_last.pth')
else:
model_file = os.path.join(config.start_from, 'model_best.pth')
else:
raise ValueError(f'{self.__class__.__name__}: Argument `start_from` must be either directory path or model checkpoint path.')
old_config = Config.load_config_json(os.path.join(restore_dir, 'config.json'))
checklist = ('caption_model', 'rnn_type', 'rnn_size', 'num_layers')
(old_config_vars, config_vars) = (vars(old_config), vars(config))
for check in checklist:
if (check in old_config_vars):
if (old_config_vars[check] != config_vars[check]):
logger.warning(f'{self.__class__.__name__}: Argument provided and loaded config disagree on `{check}`: Provided: `{config_vars[check]}` Loaded: `{old_config_vars[check]}`')
elif (check in config_vars):
logger.warning(f'{self.__class__.__name__}: Argument `{check}` is provided but is missing from loaded config.')
if config.get('resume_training', False):
opt_file = os.path.join(restore_dir, 'optimizer_last.pth')
self.optimizer.load_state_dict(torch.load(opt_file))
logger.info(f'{self.__class__.__name__}: Optimizer weights loaded from `{opt_file}`')
config.optimizer_restored = True
(missing_keys, unexpected_keys) = model.load_state_dict(torch.load(model_file), strict=strict)
logger.info(f'{self.__class__.__name__}: Model weights loaded from `{model_file}`')
restore_log = os.path.join(config.log_dir, 'restore_log.txt')
if (len(missing_keys) > 0):
_log = f'{self.__class__.__name__}: Checkpoint `{model_file}` is missing one or more parameters'
with open(restore_log, 'a') as f:
f.write(((f'''{_log}:
''' + '\n'.join(missing_keys)) + '\n\n'))
logger.info(f'{_log}. See `{restore_log}` for more info.')
if (len(unexpected_keys) > 0):
_log = f'{self.__class__.__name__}: Checkpoint `{model_file}` contains extra parameters'
with open(restore_log, 'a') as f:
f.write(((f'''{_log}:
''' + '\n'.join(unexpected_keys)) + '\n\n'))
logger.info(f'{_log}. See `{restore_log}` for more info.')
config.model_restored = True
def compute_scst_loss(self, model_inputs, gts, loss_fn):
config = self.config
model = self.model
assert isinstance(model_inputs, dict), f'Expected `model_inputs` to be dict, saw {type(model_inputs)}'
assert (config.scst_num_samples > 0), f'Expected `config.scst_num_samples` to be > 0, saw {config.scst_num_samples}'
assert (config.scst_sample in self.SCST_SAMPLE), f'Expected `config.scst_sample` to be one of `{self.SCST_SAMPLE}`, saw {config.scst_sample}'
assert (config.scst_baseline in self.SCST_BASELINE), f'Expected `config.scst_baseline` to be one of `{self.SCST_BASELINE}`, saw {config.scst_baseline}'
if (config.scst_baseline == 'greedy'):
model.eval()
with torch.no_grad():
(greedy_res, _) = model(**model_inputs, mode='sample')
else:
assert (config.scst_baseline == 'sample')
greedy_res = None
model.train()
if (config.scst_sample == 'beam_search'):
(sample_res, sample_logprobs) = model(**model_inputs, mode='sample', opt={'beam_size': config.scst_num_samples})
else:
assert (config.scst_sample == 'random')
(sample_res, sample_logprobs) = model(**model_inputs, mode='sample', opt={'num_random_sample': config.scst_num_samples, 'beam_size': 0})
if (greedy_res is None):
greedy_decoded = None
else:
greedy_decoded = [[self.tokenizer.decode(_[0])] for _ in greedy_res.cpu().numpy()]
sample_decoded = [[self.tokenizer.decode(__) for __ in _] for _ in sample_res.cpu().numpy()]
if (config.scst_baseline == 'greedy'):
assert (greedy_decoded is not None)
else:
assert (greedy_decoded is None)
(sc_sample, sc_baseline) = self.scst_scorer(refs=gts, sample=sample_decoded, baseline=greedy_decoded)
reward = map_to_cuda(torch.from_numpy((sc_sample - sc_baseline)).type_as(sample_logprobs))
mask = (sample_res.view((sample_res.size(0) * sample_res.size(1)), (- 1)) != model.pad_idx)
loss = loss_fn(sample_logprobs, mask=mask, reward=reward)
return (loss, reward, sc_sample, sc_baseline)
def eval_on_split(self, loader, split):
assert (loader.drop_last is False), '`drop_last` must be False for eval dataloader`.'
config = self.config
model = self.model
model.eval()
config.beam_size = config.get(f'beam_size_{split}', 1)
t0 = perf_counter()
image_paths = []
predictions = []
for (batch_idx, data) in enumerate(tqdm(loader, desc='Evaluating model')):
data = map_to_cuda(data)
with torch.no_grad():
seq = model(**data, opt=config, mode='sample')[0]
predictions += [self.tokenizer.decode(_[0]) for _ in seq]
image_paths += data['image_paths']
print(f'Speed: {(len(image_paths) / (perf_counter() - t0)):.2f} img/sec')
model.train()
is_test2014_split = (config.get('mscoco_online_test', False) and (split == 'test'))
if is_test2014_split:
out_dir = os.path.join(config.log_dir, f'test2014_beam_{config.beam_size}')
else:
out_dir = os.path.join(config.log_dir, f'{split}_beam_{config.beam_size}')
if config.get('eval_dir_suffix', None):
out_dir += f'_{config.eval_dir_suffix}'
json_fpath = os.path.join(out_dir, f'caption_{self.global_step:08d}.json')
self.data.coco_caption_json_dump(zip(image_paths, predictions), json_fpath)
if is_test2014_split:
val_img_paths = os.listdir(os.path.join(config.dataset_dir, 'val2014'))
fake_preds = ['an example caption' for _ in val_img_paths]
self.data.coco_caption_json_dump(zip(val_img_paths, fake_preds), json_fpath.replace('.json', '_val2014.json'))
scores = None
else:
(scores, scores_detailed, coco_eval) = evaluate_caption_json(res_file=json_fpath, ann_file=self.data.ANNOTATION_FILE)
score_fpath = os.path.join(out_dir, f'score_{self.global_step:08d}.json')
with open(score_fpath, 'w') as f:
json.dump(scores, fp=f, indent=2, sort_keys=True, ensure_ascii=False)
with open(score_fpath.replace('.json', '_detailed.json'), 'w') as f:
json.dump(scores_detailed, fp=f, indent=2, sort_keys=True, ensure_ascii=False)
score_csv_fpath = os.path.join(out_dir, 'scores.csv')
if os.path.isfile(score_csv_fpath):
score_str = ''
else:
score_str = f'''Step,{','.join((str(k) for k in self.ALL_METRICS))}
'''
with open(score_csv_fpath, 'a') as f:
score_str += f'{self.global_step:08d},'
score_str += ','.join((f'{scores[k]:.3f}' for k in self.ALL_METRICS))
f.write(f'''{score_str}
''')
return (predictions, scores, out_dir)
def eval_model(cls, state_dict, config, split='test'):
assert isinstance(config, Config), f'`config` should be an instance of `utils.config.Config`, saw {type(config)}'
self = cls(config)
self.model.load_state_dict(state_dict)
map_to_cuda(self.model)
if (split == 'val'):
self.test_loader = self.val_dataloader()
elif (split == 'test'):
self.test_loader = self.test_dataloader()
else:
raise ValueError(f"{self.__class__.__name__}: `split` must be one of ('val', 'train'), saw: {split}")
self.global_step = self.config.get('best_global_step', 0)
return self.eval_on_split(self.test_loader, split=split)
def add_argparse_args(parser: Union[(_ArgumentGroup, ArgumentParser)]):
parser.add_argument('--seed', type=int, default=8888, help='int: Random number generator (RNG) seed.')
parser.add_argument('--cache_min_free_ram', type=float, default=0.4, help='float: Minimum free RAM when caching training data. Set to 1.0 to disable.')
parser.add_argument('--num_workers', type=int, default=4, help='int: Number of workers for each `DataLoader`.')
parser.add_argument('--cached_tokens', type=str, default=None, help='str: Cached token file for calculating cider score during self critical training.')
parser.add_argument('--id', type=str, default='', help='An id identifying this run/job.')
parser.add_argument('--log_dir', type=str, default=ROOT_DIR, help='str: Logging / Saving directory.')
parser.add_argument('--start_from', type=str, default='', help='str: Load model parameters from this directory.')
parser.add_argument('--resume_training', action='store_true', help='bool: If True, resume training.')
parser.add_argument('--save_checkpoint_every', type=int, default=6000, help='int: How often to save a model checkpoint (in iterations)')
parser.add_argument('--losses_log_every', type=int, default=25, help='int: How often to perform Tensorboard dump.')
parser.add_argument('--batch_size', type=int, default=15, help='int: Batch size.')
parser.add_argument('--batch_size_eval', type=int, default=50, help='int: Batch size for evaluation.')
parser.add_argument('--max_epochs', type=int, default=15, help='int: Maximum training epoch.')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
parser.add_argument('--label_smoothing', type=float, default=0, help='')
parser.add_argument('--optim', type=str, default='adam', choices=ALL_OPTIMIZERS, help='str: Optimizer name.')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--lr_scheduler', type=str, default='noam', choices=ALL_SCHEDULERS, help='str: Scheduler name.')
parser.add_argument('--noamopt_warmup', type=int, default=10000, help='')
parser.add_argument('--noamopt_factor', type=float, default=1, help='')
parser.add_argument('--learning_rate', type=float, default=0.0005, help='float: Learning rate')
parser.add_argument('--learning_rate_min', type=float, default=1e-05, help='float: Minimum learning rate, used by Cosine Annealing.')
parser.add_argument('--learning_rate_decay_start', type=int, default=0, help='int: St which epoch to start decaying learning rate? (-1 = disabled)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='int: Every how many epoch thereafter to drop LR?')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='float: every how many epoch thereafter to drop LR?')
parser.add_argument('--scst_start_epoch', type=int, default=(- 1), help='int: Epoch to start SCST, -1 to disable.')
parser.add_argument('--scst_num_samples', type=int, default=10, help='int: Number of samples per example for SCST, must be > 0.')
parser.add_argument('--scst_sample', type=str, default='random', choices=TrainingModule.SCST_SAMPLE, help='str: SCST sampling method.')
parser.add_argument('--scst_baseline', type=str, default='sample', choices=TrainingModule.SCST_BASELINE, help='str: SCST baseline method.')
parser.add_argument('--scst_cider_weight', type=float, default=1.0, help='float: The reward weight from CIDEr-D.')
parser.add_argument('--scst_bleu_weight', type=csv_to_float_list, default=(0.0, 0.0, 0.0, 0.0), help='str: Comma-separated reward weights from BLEU-1 to BLEU-4.')
parser.add_argument('--beam_size_test', type=int, default=2, help='int: Beam size used for test set.')
parser.add_argument('--beam_size_val', type=int, default=1, help='int: Beam size used for validation set.') |
_module()
class ShuffleNetV1(BaseBackbone):
def __init__(self, groups=3, widen_factor=1.0, out_indices=(2,), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), norm_eval=False, with_cp=False):
norm_cfg = copy.deepcopy(norm_cfg)
act_cfg = copy.deepcopy(act_cfg)
super().__init__()
self.stage_blocks = [4, 8, 4]
self.groups = groups
for index in out_indices:
if (index not in range(0, 3)):
raise ValueError(f'the item in out_indices must in range(0, 3). But received {index}')
if (frozen_stages not in range((- 1), 3)):
raise ValueError(f'frozen_stages must be in range(-1, 3). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
if (groups == 1):
channels = (144, 288, 576)
elif (groups == 2):
channels = (200, 400, 800)
elif (groups == 3):
channels = (240, 480, 960)
elif (groups == 4):
channels = (272, 544, 1088)
elif (groups == 8):
channels = (384, 768, 1536)
else:
raise ValueError(f'{groups} groups is not supported for 1x1 Grouped Convolutions')
channels = [make_divisible((ch * widen_factor), 8) for ch in channels]
self.in_channels = int((24 * widen_factor))
self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layers = nn.ModuleList()
for (i, num_blocks) in enumerate(self.stage_blocks):
first_block = (i == 0)
layer = self.make_layer(channels[i], num_blocks, first_block)
self.layers.append(layer)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(self.frozen_stages):
layer = self.layers[i]
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for (name, m) in self.named_modules():
if isinstance(m, nn.Conv2d):
if ('conv1' in name):
normal_init(m, mean=0, std=0.01)
else:
normal_init(m, mean=0, std=(1.0 / m.weight.shape[1]))
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m.weight, val=1, bias=0.0001)
if isinstance(m, _BatchNorm):
if (m.running_mean is not None):
nn.init.constant_(m.running_mean, 0)
else:
raise TypeError(f'pretrained must be a str or None. But received {type(pretrained)}')
def make_layer(self, out_channels, num_blocks, first_block=False):
layers = []
for i in range(num_blocks):
first_block = (first_block if (i == 0) else False)
combine_mode = ('concat' if (i == 0) else 'add')
layers.append(ShuffleUnit(self.in_channels, out_channels, groups=self.groups, first_block=first_block, combine=combine_mode, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for (i, layer) in enumerate(self.layers):
x = layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
return tuple(outs)
def train(self, mode=True):
super().train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class ResNet(nn.Module):
def __init__(self, conv_layer, linear_layer, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv_layer = conv_layer
self.conv1 = conv_layer(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = linear_layer((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, self.conv_layer, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def print_step_info(world, vehicle):
snapshot = world.get_snapshot()
print(('%d %06.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f' % (snapshot.frame, snapshot.timestamp.elapsed_seconds, vehicle.get_acceleration().x, vehicle.get_acceleration().y, vehicle.get_acceleration().z, vehicle.get_velocity().x, vehicle.get_velocity().y, vehicle.get_velocity().z, vehicle.get_location().x, vehicle.get_location().y, vehicle.get_location().z))) |
def calculate(file_list, gt_file_list, args, MCD):
for (i, cvt_path) in enumerate(file_list):
corresponding_list = list(filter((lambda gt_path: (get_basename(gt_path) in cvt_path)), gt_file_list))
assert (len(corresponding_list) == 1)
gt_path = corresponding_list[0]
gt_basename = get_basename(gt_path)
gt_feats = world_extract(gt_path, args)
cvt_feats = world_extract(cvt_path, args)
gt_mcep_nonsil_pow = extfrm(gt_feats['mcep'], gt_feats['npow'])
cvt_mcep_nonsil_pow = extfrm(cvt_feats['mcep'], cvt_feats['npow'])
(_, path) = fastdtw(cvt_mcep_nonsil_pow, gt_mcep_nonsil_pow, dist=scipy.spatial.distance.euclidean)
twf_pow = np.array(path).T
cvt_mcep_dtw_pow = cvt_mcep_nonsil_pow[twf_pow[0]]
gt_mcep_dtw_pow = gt_mcep_nonsil_pow[twf_pow[1]]
diff2sum = np.sum(((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2), 1)
mcd = np.mean(((10.0 / np.log(10.0)) * np.sqrt((2 * diff2sum))), 0)
print('{} {}'.format(gt_basename, mcd))
MCD.append(mcd) |
class RandomSizedEarser(object):
def __init__(self, sl=0.02, sh=0.2, asratio=0.3, p=0.5):
self.sl = sl
self.sh = sh
self.asratio = asratio
self.p = p
def __call__(self, img):
p1 = random.uniform((- 1), 1.0)
W = img.size[0]
H = img.size[1]
area = (H * W)
if (p1 > self.p):
return img
else:
gen = True
while gen:
Se = (random.uniform(self.sl, self.sh) * area)
re = random.uniform(self.asratio, (1 / self.asratio))
He = np.sqrt((Se * re))
We = np.sqrt((Se / re))
xe = random.uniform(0, (W - We))
ye = random.uniform(0, (H - He))
if (((xe + We) <= W) and ((ye + He) <= H) and (xe > 0) and (ye > 0)):
x1 = int(np.ceil(xe))
y1 = int(np.ceil(ye))
x2 = int(np.floor((x1 + We)))
y2 = int(np.floor((y1 + He)))
part1 = img.crop((x1, y1, x2, y2))
Rc = random.randint(0, 255)
Gc = random.randint(0, 255)
Bc = random.randint(0, 255)
I = Image.new('RGB', part1.size, (Rc, Gc, Bc))
img.paste(I, part1.size)
return img |
def get_lights_colors_from_cmds(cmds: VehicleCommands, t: Timestamp) -> LightsColors:
try:
lights_colors = lights_colors_from_lights_cmd(cmds.lights, cmds.acc, t)
except AttributeError:
lights_colors = None
return lights_colors |
class SqueezeBertForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def mnist_test_data(num_max=None):
x_test = np.load((common.user_home_dir() + '/EvalDNN-data/MNIST/tensorflow/x_test.npy'))
y_test = np.load((common.user_home_dir() + '/EvalDNN-data/MNIST/tensorflow/y_test.npy'))
if (num_max is not None):
x_test = x_test[:num_max]
y_test = y_test[:num_max]
return (x_test, y_test) |
def is_feasible(solution: Solution) -> bool:
return (number_of_violated_constraints(solution) == 0) |
def test_move_fallback():
m2 = m.get_moveissue2(2)
assert (m2.value == 2)
m1 = m.get_moveissue1(1)
assert (m1.value == 1) |
def add_wire(x, y, name):
global num_wires
wire_idx = num_wires
num_wires = (num_wires + 1)
wname = (x, y, name)
wire_names[wname] = wire_idx
wire_names_r[wire_idx] = wname
wire_segments[wire_idx] = dict()
if (('TILE_WIRE_' + wname[2].upper().replace('/', '_')) in gfx_wire_ids):
if ((wname[0], wname[1]) not in wire_segments[wire_idx]):
wire_segments[wire_idx][(wname[0], wname[1])] = list()
wire_segments[wire_idx][(wname[0], wname[1])].append(wname[2])
return wire_idx |
class Spaces(object):
def __getattr__(self, k):
warnings.warn('DEPRECATION WARNING: to improve load times, gym no longer automatically loads gym.spaces. Please run "import gym.spaces" to load gym.spaces on your own. This warning will turn into an error in a future version of gym.')
import gym.spaces
return getattr(gym.spaces, k) |
def main():
parser = HfArgumentParser((Args, GenerationConfig))
(args, generation_config) = cast(tuple[(Args, GenerationConfig)], parser.parse_args_into_dataclasses())
(raw_problem_fn, map_problem_fn) = ((get_humaneval_raw_problems, map_humaneval_problem) if (args.dataset == 'humaneval') else (get_mbpp_raw_problems, map_mbpp_problem))
raw_problems = raw_problem_fn()
problems = list(map(map_problem_fn, raw_problems))
state = get_model_context(args.model_key, args.model_name_or_path)
problems_chunked = list(chunked(list(problems), args.n_problems_per_batch))
iter = itertools.product(problems_chunked, range(args.n_batches))
n_total = (len(problems_chunked) * args.n_batches)
Path(args.save_path).write_text('')
for (problems, batch_idx) in tqdm(iter, total=n_total):
task_ids = [problem['id'] for problem in problems]
prompts = [MAGICODER_PROMPT.format(instruction=problem['instruction'], response=problem['response_prefix']) for problem in problems]
print('PROMPT')
print(prompts[(- 1)])
all_prompts = (prompts * args.n_samples_per_problem)
all_task_ids = (task_ids * args.n_samples_per_problem)
response = state.complete(generation_config, all_prompts)
completions = response.decoded_outputs
assert (len(problems) <= args.n_problems_per_batch)
assert (len(completions) == (len(problems) * args.n_samples_per_problem))
print('COMPLETION')
print(completions[(- 1)])
samples = [dict(task_id=task_id, completion=completion[:(index if ((index := completion.find('```')) != (- 1)) else len(completion))]) for (task_id, completion) in zip(all_task_ids, completions)]
write_jsonl(args.save_path, samples, append=True) |
def get_model_sparsity(model):
prunables = 0
nnzs = 0
for m in model.modules():
if _is_prunable_module(m):
prunables += m.weight.data.numel()
nnzs += m.weight.data.nonzero().size(0)
return (nnzs / prunables) |
class ScalarTypeNode(ExprNode):
def __init__(self, parse_info=None, raw_text=None):
super().__init__(IRNodeType.ScalarType, parse_info=parse_info, raw_text=raw_text)
self.is_int = False |
def get_dataset_info(dir_path, name):
file_list = get_dir_info(os.path.join(dir_path, name))
return dict(name=name, path=((('/' + dir_path) + '/') + name), sessions=list(filter((lambda f: f['is_session']), file_list))) |
class AverageMeter(object):
def __init__(self, momentum=0.999):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.long_count = 0
self.momentum = momentum
self.moving_avg = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if (self.long_count == 0):
self.moving_avg = val
else:
momentum = min(self.momentum, (1.0 - (1.0 / self.long_count)))
self.moving_avg = ((self.moving_avg * momentum) + (val * (1 - momentum)))
self.val = val
self.sum += (val * n)
self.count += n
self.long_count += n
self.avg = (self.sum / self.count) |
class DEEPLABHead(nn.Module):
def __init__(self, in_channels, out_channels, lateral=True, norm_layer=None, up_kwargs=None):
super(DEEPLABHead, self).__init__()
self.lateral = lateral
self.conv5 = nn.Sequential(nn.Conv2d(in_channels, 512, 3, padding=1, bias=False), norm_layer(512), nn.ReLU(inplace=True))
if lateral:
self.connect = nn.ModuleList([nn.Sequential(nn.Conv2d(512, 512, kernel_size=1, bias=False), norm_layer(512), nn.ReLU(inplace=True)), nn.Sequential(nn.Conv2d(1024, 512, kernel_size=1, bias=False), norm_layer(512), nn.ReLU(inplace=True))])
self.fusion = nn.Sequential(nn.Conv2d((3 * 512), 512, kernel_size=3, padding=1, bias=False), norm_layer(512), nn.ReLU(inplace=True))
self.conv6 = Classifier_Module(512, [6, 12, 18, 24], [6, 12, 18, 24], out_channels)
def forward(self, *inputs):
feat = self.conv5(inputs[(- 1)])
if self.lateral:
c2 = self.connect[0](inputs[1])
c3 = self.connect[1](inputs[2])
feat = self.fusion(torch.cat([feat, c2, c3], 1))
return tuple([self.conv6(feat)]) |
def densenet201(num_classes=1000, pretrained='imagenet'):
model = models.densenet201(pretrained=False)
if (pretrained is not None):
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model |
class TopDownGlobalChaFuse(HybridBlock):
def __init__(self, channels=64):
super(TopDownGlobalChaFuse, self).__init__()
self.channels = channels
with self.name_scope():
self.global_att = nn.HybridSequential(prefix='global_att')
self.global_att.add(nn.GlobalAvgPool2D())
self.global_att.add(nn.Conv2D(self.channels, kernel_size=1, strides=1, padding=0))
self.global_att.add(nn.BatchNorm())
self.sigmoid = nn.Activation('sigmoid')
self.post = nn.HybridSequential(prefix='post')
self.post.add(nn.Conv2D(channels, kernel_size=3, strides=1, padding=1, dilation=1))
self.post.add(nn.BatchNorm())
self.post.add(nn.Activation('relu'))
def hybrid_forward(self, F, xh, xl):
xa = xh
ag = self.global_att(xa)
xa3 = self.sigmoid(ag)
xs = (xh + F.broadcast_mul(xl, xa3))
xs = self.post(xs)
return xs |
def print_hashes(instances, only_one=(- 1)):
print(((('=' * 80) + '\n') + 'Printing hashes '))
kl = []
for i in instances:
if is_running_instance(i):
if (only_one >= 0):
c1 = ((' "cd node_cpp_code/_Hashes;find . -name \'*\' -type f -exec grep \'^' + str(only_one)) + ':\' {} \\;"')
else:
c1 = ' "cd node_cpp_code/_Hashes;find . -name \'*\' -type f -exec tail -n 1 {} \\;"'
c1 = (((((('ssh -i ' + get_instance_key(i)) + ' ') + username) + '') + get_instance_ip(i)) + c1)
kl.append(subprocess.Popen(c1, shell=True, stdin=PIPE, stdout=PIPE))
d = {}
count = 0
for l in kl:
rd = l.communicate()[0].decode('ascii')
ls = rd.split('\n')
for ln in ls:
v = ln.split(':')
if (len(v) == 2):
if (ln in d):
d[ln] += 1
else:
d[ln] = 1
count += 1
for ln in d:
print(('\t%20s ::: %5d ::: %4.0f %%' % (ln, d[ln], ((100.0 * d[ln]) / count))))
print('\x1b[32;1m\n[+] Done relaunching nodes \x1b[0m') |
class Baseline(nn.Module):
def __init__(self, input_dim, latent_dim, device, obsrv_std=0.01, use_binary_classif=False, classif_per_tp=False, use_poisson_proc=False, linear_classifier=False, n_labels=1, train_classif_w_reconstr=False):
super(Baseline, self).__init__()
self.input_dim = input_dim
self.latent_dim = latent_dim
self.n_labels = n_labels
self.obsrv_std = torch.Tensor([obsrv_std]).to(device)
self.device = device
self.use_binary_classif = use_binary_classif
self.classif_per_tp = classif_per_tp
self.use_poisson_proc = use_poisson_proc
self.linear_classifier = linear_classifier
self.train_classif_w_reconstr = train_classif_w_reconstr
z0_dim = latent_dim
if use_poisson_proc:
z0_dim += latent_dim
if use_binary_classif:
if linear_classifier:
self.classifier = nn.Sequential(nn.Linear(z0_dim, n_labels))
else:
self.classifier = create_classifier(z0_dim, n_labels)
utils.init_network_weights(self.classifier)
def get_gaussian_likelihood(self, truth, pred_y, mask=None):
if (mask is not None):
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
log_density_data = masked_gaussian_log_density(pred_y, truth, obsrv_std=self.obsrv_std, mask=mask)
log_density_data = log_density_data.permute(1, 0)
log_density = torch.mean(log_density_data, 0)
return log_density
def get_mse(self, truth, pred_y, mask=None):
if (mask is not None):
mask = mask.repeat(pred_y.size(0), 1, 1, 1)
log_density_data = compute_mse(pred_y, truth, mask=mask)
return torch.mean(log_density_data)
def compute_all_losses(self, batch_dict, n_tp_to_sample=None, n_traj_samples=1, kl_coef=1.0):
(pred_x, info) = self.get_reconstruction(batch_dict['tp_to_predict'], batch_dict['observed_data'], batch_dict['observed_tp'], mask=batch_dict['observed_mask'], n_traj_samples=n_traj_samples, mode=batch_dict['mode'])
likelihood = self.get_gaussian_likelihood(batch_dict['data_to_predict'], pred_x, mask=batch_dict['mask_predicted_data'])
mse = self.get_mse(batch_dict['data_to_predict'], pred_x, mask=batch_dict['mask_predicted_data'])
device = get_device(batch_dict['data_to_predict'])
ce_loss = torch.Tensor([0.0]).to(device)
if ((batch_dict['labels'] is not None) and self.use_binary_classif):
if ((batch_dict['labels'].size((- 1)) == 1) or (len(batch_dict['labels'].size()) == 1)):
ce_loss = compute_binary_CE_loss(info['label_predictions'], batch_dict['labels'])
else:
ce_loss = compute_multiclass_CE_loss(info['label_predictions'], batch_dict['labels'], mask=batch_dict['mask_predicted_data'])
if torch.isnan(ce_loss):
print('label pred')
print(info['label_predictions'])
print('labels')
print(batch_dict['labels'])
raise Exception('CE loss is Nan!')
pois_log_likelihood = torch.Tensor([0.0]).to(get_device(batch_dict['data_to_predict']))
if self.use_poisson_proc:
pois_log_likelihood = compute_poisson_proc_likelihood(batch_dict['data_to_predict'], pred_x, info, mask=batch_dict['mask_predicted_data'])
pois_log_likelihood = torch.mean(pois_log_likelihood, 1)
loss = (- torch.mean(likelihood))
if self.use_poisson_proc:
loss = (loss - (0.1 * pois_log_likelihood))
if self.use_binary_classif:
if self.train_classif_w_reconstr:
loss = (loss + (ce_loss * 100))
else:
loss = ce_loss
results = {}
results['loss'] = torch.mean(loss)
results['likelihood'] = torch.mean(likelihood).detach()
results['mse'] = torch.mean(mse).detach()
results['pois_likelihood'] = torch.mean(pois_log_likelihood).detach()
results['ce_loss'] = torch.mean(ce_loss).detach()
results['kl'] = 0.0
results['kl_first_p'] = 0.0
results['std_first_p'] = 0.0
if ((batch_dict['labels'] is not None) and self.use_binary_classif):
results['label_predictions'] = info['label_predictions'].detach()
return results |
def quantize_with_min_and_max(data, device, non_zero, in_min, in_max):
np_data = np.array(data).astype(float)
(scale, zero, out_min, out_max) = adjust_range(in_min, in_max, device, non_zero=non_zero)
output = np.clip(np.round((zero + (np_data / scale))).astype(np.int32), 0, 255)
quantized_data = QuantizedData()
quantized_data.data = output
quantized_data.scale = scale
quantized_data.zero = zero
quantized_data.minval = out_min
quantized_data.maxval = out_max
return quantized_data |
(config_path='../eztorch/configs/run/supervised/resnet3d50', config_name='ucf101')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'test.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
model: LightningModule = hydra.utils.instantiate(config.model)
model_ckpt_dirpath = (config.callbacks.model_checkpoint.dirpath if config.callbacks.get('model_checkpoint') else None)
ckpt_path = get_last_ckpt_in_path_or_dir(config.ckpt_path, model_ckpt_dirpath)
if (ckpt_path is not None):
warnings.warn(f'A checkpoint has been found and loaded from this file: {ckpt_path}', category=RuntimeWarning)
rank_zero_info(resolved_config)
rank_zero_info(model)
model = compile_model(model, **config.get('compile', {}))
if config.test.get('ckpt_by_callback_mode'):
ckpt_paths = get_ckpt_by_callback_mode(config.test.ckpt_path, config.test.ckpt_by_callback_mode)
else:
ckpt_paths = [config.test.ckpt_path]
for ckpt_path in ckpt_paths:
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, devices=1, strategy='auto')
trainer.test(model, ckpt_path=ckpt_path, datamodule=datamodule) |
def get_parse_args():
parser = argparse.ArgumentParser(description='PyTorch training script')
parser.add_argument('--dataset', default='h36m', type=str, metavar='NAME', help='target dataset')
parser.add_argument('--keypoints', default='gt', type=str, metavar='NAME', help='2D detections to use, gt/hr/cpn_ft_h36m_dbb/detectron_ft_h36m')
parser.add_argument('--actions', default='*', type=str, metavar='LIST', help='actions to train/test on, separated by comma, or * for all')
parser.add_argument('--checkpoint', default='checkpoint/debug', type=str, metavar='PATH', help='checkpoint directory')
parser.add_argument('--snapshot', default=25, type=int, help='save models_baseline for every (default: 20)')
parser.add_argument('--note', default='debug', type=str, help='additional name on checkpoint directory')
parser.add_argument('--evaluate', default='', type=str, metavar='FILENAME', help='checkpoint to evaluate (file name)')
parser.add_argument('--action-wise', default=True, type=(lambda x: (str(x).lower() == 'true')), help='train s1only')
parser.add_argument('--posenet_name', default='videopose', type=str, help='posenet: gcn/stgcn/videopose/mlp')
parser.add_argument('--stages', default=4, type=int, metavar='N', help='stages of baseline model')
parser.add_argument('--dropout', default=0.25, type=float, help='dropout rate')
parser.add_argument('--batch_size', default=1024, type=int, metavar='N', help='batch size in terms of predicted frames')
parser.add_argument('--epochs', default=50, type=int, metavar='N', help='number of training epochs')
parser.add_argument('--lr', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--lr_decay', type=int, default=100000, help='num of steps of learning rate decay')
parser.add_argument('--lr_gamma', type=float, default=0.96, help='gamma of learning rate decay')
parser.add_argument('--no_max', dest='max_norm', action='store_false', help='if use max_norm clip on grad')
parser.set_defaults(max_norm=True)
parser.add_argument('--random_seed', type=int, default=0)
parser.add_argument('--downsample', default=1, type=int, metavar='FACTOR', help='downsample frame rate by factor')
parser.add_argument('--pretrain', default=False, type=(lambda x: (str(x).lower() == 'true')), help='used in poseaug')
parser.add_argument('--s1only', default=False, type=(lambda x: (str(x).lower() == 'true')), help='train S1 only')
parser.add_argument('--num_workers', default=2, type=int, metavar='N', help='num of workers for data loading')
args = parser.parse_args()
return args |
def lora_merge_unmerge_state_dict(engine, state_dict, peft_config, merge=True):
for worker in engine.workers:
lora_reassign_weights(worker.model, state_dict, r=peft_config['r'], lora_alpha=peft_config['lora_alpha'], fan_in_fan_out=peft_config['fan_in_fan_out'], merge=merge) |
def recenter(mesh: Type[trimesh.base.Trimesh], center_fn: Callable[([Type[trimesh.base.Trimesh]], Type[np.ndarray])], in_place: bool=True) -> Type[trimesh.base.Trimesh]:
center = center_fn(mesh)
mesh_ = (mesh if in_place else copy.deepcopy(mesh))
mesh_.vertices = (mesh.vertices - center)
mesh = mesh_
return mesh |
def drn_c_26(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-26'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model |
class RandomSampler(object):
def __init__(self, data_source, state=None, seed=None):
self.data_source = data_source
self.rng = np.random.RandomSatate(seed)
def __iter__(self):
return iter(torch.randperm(len(self.data_source)).long())
def __len__(self):
return len(self.data_source)
def get_state(self):
return self.rng.get_state()
def set_state(self, state):
self.rng.set_state(state) |
def render_git_describe_long(pieces):
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += ('-%d-g%s' % (pieces['distance'], pieces['short']))
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered |
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
return {'pixel_values': pixel_values} |
def is_hf_dataset(dataset):
if (not is_datasets_available()):
return False
from datasets import Dataset
return isinstance(dataset, Dataset) |
_optimizer('lamb')
class FairseqLAMB(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
try:
from apex.optimizers import FusedLAMB
self._optimizer = FusedLAMB(params, **self.optimizer_config)
except ImportError:
raise ImportError('Please install apex to use LAMB optimizer')
def add_args(parser):
parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', help='betas for LAMB optimizer')
parser.add_argument('--lamb-eps', type=float, default=1e-08, metavar='D', help='epsilon for LAMB optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.lamb_betas), 'eps': self.args.lamb_eps, 'weight_decay': self.args.weight_decay}
def supports_flat_params(self):
return False |
class Partition():
def __init__(self, partitionId, chipCounter, sizeInterleaved, parentLayer, isInhibitory=False, resetMode='hard'):
assert isinstance(parentLayer, Layer)
self.id = partitionId
self.sizeInterleaved = sizeInterleaved
self._layer = parentLayer
self._inputAxonGroups = []
self._outputAxonGroups = []
self._synapseGroups = []
self._compartmentGroup = None
self._synFmts = []
self._numSyn = 0
self._numSynEntries = 0
self._numSynMemWords = 0
self._numInputAxons = 0
self._numOutputAxons = 0
self._numOutputAxonCfgEntries = 0
self._inputAxonCost = 0
self._outputAxonCost = 0
self._synapseCost = 0
self._cost = 0
self._chipCounter = chipCounter
self.chipId = None
self.coreId = None
self._isInhibitory = isInhibitory
self._resetMode = resetMode
def inputAxonGroups(self):
return self._inputAxonGroups
def synapseGroups(self):
return self._synapseGroups
def compartmentGroup(self):
return self._compartmentGroup
def outputAxonGroups(self):
return self._outputAxonGroups
def synFmts(self):
return self._synFmts
def addInputAxonGroup(self, inputAxonGroup):
assert isinstance(inputAxonGroup, InputAxonGroup)
self._inputAxonCost += inputAxonGroup.cost
self._numInputAxons += inputAxonGroup.numAxons
self._inputAxonGroups.append(inputAxonGroup)
def addSynapseGroup(self, synapseGroup):
assert isinstance(synapseGroup, SynapseGroup)
self._synapseCost += synapseGroup.cost
self._numSyn += synapseGroup.numSyn
self._numSynEntries += synapseGroup.numSynEntries
self._numSynMemWords += synapseGroup.numSynMemWords
self._synapseGroups.append(synapseGroup)
def addCompartmentGroup(self, compartmentGroup):
assert isinstance(compartmentGroup, CompartmentGroup)
self._compartmentGroup = compartmentGroup
def addOutputAxonGroup(self, outputAxonGroup):
assert isinstance(outputAxonGroup, OutputAxonGroup)
self._outputAxonCost += outputAxonGroup.cost
self._numOutputAxons += outputAxonGroup.numAxons
self._numOutputAxonCfgEntries += outputAxonGroup.numAxonCfgEntries
self._outputAxonGroups.append(outputAxonGroup)
def addSynFmt(self, synFmt):
assert isinstance(synFmt, SynFmt)
self._synFmts.append(synFmt)
def numSyn(self):
return self._numSyn
def numSynEntries(self):
return self._numSynEntries
def numSynMemWords(self):
return self._numSynMemWords
def numInputAxons(self):
return self._numInputAxons
def numOutputAxons(self):
return self._numOutputAxons
def numOutputAxonCfgEntries(self):
return self._numOutputAxonCfgEntries
def inputAxonCost(self):
return self._inputAxonCost
def outputAxonCost(self):
return self._outputAxonCost
def synapseCost(self):
return self._synapseCost
def cost(self):
return ((self.inputAxonCost + self.outputAxonCost) + self.synapseCost)
def layer(self):
return self._layer
def chipCounter(self):
return self._chipCounter
def isInhibitory(self):
return self._isInhibitory
def resetMode(self):
return self._resetMode |
def test_batting_stats_bref() -> None:
result = league_batting_stats.batting_stats_bref(2019)
assert (result is not None)
assert (not result.empty)
assert (len(result.columns) == 28)
assert (len(result) == 991) |
def evaluate_caption_json(res_file, ann_file):
assert ann_file.endswith('.json'), '`ann_file` should end with `.json`, saw `{}` instead.'.format(ann_file)
assert res_file.endswith('.json'), '`res_file` should end with `.json`, saw `{}` instead.'.format(res_file)
default_ann_dir = os.path.join(COCO_DIR, 'annotations')
default_res_dir = os.path.join(COCO_DIR, 'results')
coco = COCO(os.path.join(default_ann_dir, ann_file))
coco_res = coco.loadRes(os.path.join(default_res_dir, res_file))
coco_eval = COCOEvalCap(coco, coco_res)
coco_eval.params['image_id'] = coco_res.getImgIds()
coco_eval.evaluate()
results = {}
for (metric, score) in coco_eval.eval.items():
results[metric] = score
return (results, coco_eval.evalImgs, coco_eval) |
def stable_resize_token_embeddings(model: transformers.PreTrainedModel, target_size: int):
num_new_tokens = (target_size - model.get_input_embeddings().weight.size(0))
model.resize_token_embeddings(target_size)
if (num_new_tokens > 0):
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
input_embeddings[(- num_new_tokens):] = input_embeddings_avg
output_embeddings[(- num_new_tokens):] = output_embeddings_avg |
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
return LambdaLR(optimizer, lr_lambda, last_epoch) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, AdapterTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args, adapter_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args, adapter_args) = parser.parse_args_into_dataclasses()
training_args.predict_with_generate = True
wandb.init(entity='lklab_kaist', project='ROE_experiments_ICLR', name=training_args.output_dir)
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
print('#### last_checkpoint ', last_checkpoint)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
pass
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
model_args.model_name_or_path = 'google/flan-t5-xl'
config = T5Config.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
config.train_task_adapters = adapter_args.train_task_adapters
config.prefix_tuning = adapter_args.prefix_tuning
config.attn_prefix_tuning = model_args.attn_prefix_tuning
config.attn_method = model_args.attn_method
config.ignore_target = model_args.ignore_target
config.shared_attn = model_args.shared_attn
config.prefix_num = model_args.prefix_num
config.num_target = len(data_args.task_name)
config.temperature = model_args.temperature
config.fix_attention = model_args.fix_attention
adapter_config = get_adapter_config(adapter_args, data_args, training_args, config)
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = T5ForConditionalGeneration.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), adapter_config=adapter_config)
if (model_args.load_prefix_embeddings is True):
if (model_args.prompt_embedding_path is None):
for (name, param) in model.named_parameters():
if (('prefix_shared' in name) or ('prefix' in name)):
shared_params = [param]
else:
shared_params = []
for path in model_args.prompt_embedding_path:
shared_param = torch.load(path)
shared_params.append(shared_param)
if (model_args.target_prompt_embedding_path is not None):
target_prompt_embedding = torch.load(model_args.target_prompt_embedding_path)
if (model_args.attn_prefix_tuning is True):
if ((training_args.do_train is True) and (model_args.shared_attn is False)):
model.store_prefix_weights(shared_params)
model.update_prefix_weights_single(shared_params[0])
elif ((training_args.do_train is True) and (model_args.shared_attn is True)):
model.store_prefix_weights(shared_params)
model.update_prefix_weights_multi(shared_params[0], num_target=config.num_target)
else:
model.store_prefix_weights(shared_params)
model.update_prefix_weights_single(target_prompt_embedding)
elif (model_args.target_prompt_embedding_path is None):
model.update_prefix_weights(shared_params)
else:
model.update_prefix_weights(shared_params, target_prompt_embedding)
if ((model_args.load_attention is True) and (model_args.attn_path is not None)):
model.update_attention_weights(torch.load(model_args.attn_path))
if ((model_args.load_attention is True) and (model_args.attn_path_sub is not None)):
model.update_attention_weights_sub(model_args.attn_path_sub)
if ((model_args.load_layer_norm is True) and (model_args.layer_norm_dir is not None)):
model.update_layer_norm_weights(model_args.layer_norm_dir)
model.resize_token_embeddings(len(tokenizer))
model = modify_model_after_init(model, training_args, adapter_args, adapter_config)
model_args.load_adapter_weights = False
if model_args.load_adapter_weights:
adapter_params = {}
lst = os.listdir(os.path.join(training_args.output_dir, 'adapter_params'))
for path in lst:
full_path = os.path.join(training_args.output_dir, 'adapter_params', path)
params = torch.load(full_path)
path_ = path.split('.')
path = '.'.join(path_[:(- 1)])
adapter_params[path] = params
load_cnt = 0
for (name, param) in model.named_parameters():
if param.requires_grad:
load_cnt += 1
param.data = adapter_params[name].cuda()
print(f'load count: {load_cnt}')
print(f'Finished loading {len(adapter_params)} number of adapter parameter files')
data_args.dataset_name = data_args.task_name
data_args.test_dataset_name = data_args.test_dataset_name
data_args.dataset_config_name = data_args.dataset_config_name
data_args.eval_dataset_name = ['xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'xsum', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'trec', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'cos_e', 'commonsense_qa', 'commonsense_qa', 'commonsense_qa', 'commonsense_qa', 'commonsense_qa', 'dream', 'dream', 'dream', 'dream', 'dream', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quail', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'quartz', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'social_i_qa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'wiqa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'cosmos_qa', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'qasc', 'quarel', 'quarel', 'quarel', 'quarel', 'quarel', 'sciq', 'sciq', 'sciq', 'sciq', 'sciq', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'wiki_hop', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'amazon_polarity', 'app_reviews', 'app_reviews', 'app_reviews', 'app_reviews', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'imdb', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'rotten_tomatoes', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'yelp_review_full', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'paws', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_qqp', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'glue_mrpc', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'ag_news', 'dbpedia_14', 'dbpedia_14', 'dbpedia_14', 'dbpedia_14', 'adversarial_qa', 'adversarial_qa', 'adversarial_qa', 'adversarial_qa', 'adversarial_qa', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'quoref', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'ropes', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'duorc', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'hotpot_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'wiki_qa', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'common_gen', 'wiki_bio', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'cnn_dailymail', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'gigaword', 'multi_news', 'multi_news', 'multi_news', 'multi_news', 'multi_news', 'multi_news', 'samsum', 'samsum', 'samsum', 'samsum', 'samsum', 'samsum', 'samsum']
data_args.eval_dataset_config_name = ['none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'v1.11', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'original', 'original', 'original', 'original', 'original', 'original', 'original', 'original', 'original', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'labeled_final', 'qqp', 'qqp', 'qqp', 'qqp', 'qqp', 'qqp', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'mrpc', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'adversarialQA', 'adversarialQA', 'adversarialQA', 'adversarialQA', 'adversarialQA', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'ParaphraseRC', 'fullwiki', 'fullwiki', 'fullwiki', 'fullwiki', 'fullwiki', 'fullwiki', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', '3.0.0', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none', 'none']
data_args.eval_prompts = ['DOC_write_summary_of_above', 'article_DOC_summary', 'DOC_how_would_you_rephrase_few_words', 'college_roommate_asked_DOC_so_I_recap', 'DOC_boils_down_to_simple_idea_that', 'summarize_DOC', 'summarize_this_DOC_summary', 'DOC_given_above_write_one_sentence', 'read_below_DOC_write_abstract', 'DOC_tldr', 'what_category_best_describe', 'fine_grained_LOC', 'fine_grained_NUM_context_first', 'fine_grained_ENTY', 'fine_grained_NUM', 'pick_the_best_descriptor', 'fine_grained_open_context_first', 'fine_grained_LOC_context_first', 'which_category_best_describes', 'fine_grained_DESC', 'trec1', 'fine_grained_ABBR', 'fine_grained_ABBR_context_first', 'trec2', 'fine_grained_HUM', 'fine_grained_open', 'fine_grained_HUM_context_first', 'fine_grained_DESC_context_first', 'question_description_option_text', 'question_description_option_id', 'rationale', 'question_option_description_text', 'aligned_with_common_sense', 'description_question_option_id', 'explain_why_human', 'generate_explanation_given_text', 'description_question_option_text', 'i_think', 'question_option_description_id', 'answer_given_question_without_options', 'question_answering', 'question_to_answer_index', 'most_suitable_answer', 'answer_to_question', 'generate-last-utterance', 'answer-to-dialogue', 'generate-first-utterance', 'baseline', 'read_the_following_conversation_and_answer_the_question', 'context_question_answer_description_id', 'context_question_answer_description_text', 'description_context_question_answer_id', 'context_question_description_answer_text', 'context_question_description_text', 'context_description_question_text', 'context_question_description_answer_id', 'no_prompt_id', 'context_description_question_answer_id', 'description_context_question_text', 'no_prompt_text', 'context_description_question_answer_text', 'description_context_question_answer_text', 'use_info_from_question_paragraph', 'paragraph_question_plain_concat', 'use_info_from_paragraph_question', 'answer_question_based_on', 'answer_question_below', 'read_passage_below_choose', 'having_read_above_passage', 'given_the_fact_answer_the_q', 'I was wondering', 'Show choices and generate answer', 'Check if a random answer is valid or not', 'Generate the question from the answer', 'Generate answer', 'Show choices and generate index', 'what_might_be_the_first_step_of_the_process', 'what_might_be_the_last_step_of_the_process', 'what_is_the_missing_first_step', 'what_is_the_final_step_of_the_following_process', 'effect_with_string_answer', 'which_of_the_following_is_the_supposed_perturbation', 'effect_with_label_answer', 'does_the_supposed_perturbation_have_an_effect', 'context_answer_to_question', 'description_context_question_answer_text', 'description_context_question_text', 'description_context_question_answer_id', 'context_description_question_answer_text', 'no_prompt_id', 'context_question_description_text', 'no_prompt_text', 'context_description_question_answer_id', 'context_question_description_answer_id', 'context_description_question_text', 'context_question_description_answer_text', 'only_question_answer', 'is_correct_1', 'qa_with_separated_facts_1', 'qa_with_separated_facts_3', 'qa_with_separated_facts_4', 'qa_with_separated_facts_5', 'qa_with_combined_facts_1', 'is_correct_2', 'qa_with_separated_facts_2', 'do_not_use', 'logic_test', 'heres_a_story', 'choose_between', 'testing_students', 'Direct Question (Closed Book)', 'Multiple Choice (Closed Book)', 'Multiple Choice Question First', 'Multiple Choice', 'Direct Question', 'choose_best_object_interrogative_1', 'explain_relation', 'generate_object', 'generate_subject', 'choose_best_object_affirmative_1', 'choose_best_object_affirmative_3', 'generate_subject_and_object', 'choose_best_object_affirmative_2', 'choose_best_object_interrogative_2', 'Is_this_review', 'User_recommend_this_product', 'Is_this_product_review_positive', 'Is_this_review_negative', 'convey_negative_or_positive_sentiment', 'negative_or_positive_tone', 'user_satisfied', 'would_you_buy', 'flattering_or_not', 'categorize_rating_using_review', 'generate_review', 'convert_to_star_rating', 'convert_to_rating', 'Movie Expressed Sentiment 2', 'Reviewer Opinion bad good choices', 'Sentiment with choices ', 'Reviewer Sentiment Feeling', 'Writer Expressed Sentiment', 'Movie Expressed Sentiment', 'Text Expressed Sentiment', 'Negation template for positive and negative', 'Reviewer Enjoyment Yes No', 'Reviewer Expressed Sentiment', 'Reviewer Enjoyment', 'Movie Expressed Sentiment 2', 'Reviewer Opinion bad good choices', 'Sentiment with choices ', 'Reviewer Sentiment Feeling', 'Writer Expressed Sentiment', 'Movie Expressed Sentiment', 'Text Expressed Sentiment', 'Reviewer Enjoyment Yes No', 'Reviewer Expressed Sentiment', 'Reviewer Enjoyment', 'so_i_would', 'based_on_that', 'format_star', 'this_place', 'format_score', 'on_a_scale', 'format_rating', 'task_description-no-label', 'Meaning', 'context-question-no-label', 'Rewrite-no-label', 'context-question', 'Concatenation', 'paraphrase-task', 'Concatenation-no-label', 'Meaning-no-label', 'PAWS-ANLI GPT3', 'Rewrite', 'PAWS-ANLI GPT3-no-label', 'quora', 'duplicate or not', 'same thing', 'answer', 'meaning', 'duplicate', 'generate_paraphrase', 'want to know', 'paraphrase', 'equivalent', 'generate_sentence', 'replace', 'same thing', 'classify_question_first', 'classify_with_choices_question_first', 'recommend', 'which_section_choices', 'which_section', 'classify_with_choices', 'classify', 'given_list_what_category_does_the_paragraph_belong_to', 'pick_one_category_for_the_following_text', 'given_a_choice_of_categories ', 'given_a_list_of_category_what_does_the_title_belong_to', 'generate_question', 'tell_what_it_is', 'question_context_answer', 'based_on', 'answer_the_following_q', 'Guess Answer', 'Answer Question Given Context', 'Find Answer', 'Context Contains Answer', 'Given Context Answer Question', 'What Is The Answer', 'Answer Test', 'Guess Title For Context', 'Found Context Online', 'Answer Friend Question', 'Read And Extract ', 'prompt_beginning', 'prompt_bottom_no_hint', 'prompt_bottom_hint_beginning', 'given_background_situation', 'plain_no_background', 'plain_bottom_hint', 'plain_background_situation', 'background_new_situation_answer', 'background_situation_middle', 'new_situation_background_answer', 'prompt_mix', 'read_background_situation', 'build_story_around_qa', 'decide_worth_it', 'question_answering', 'movie_director', 'generate_question', 'extract_answer', 'title_generation', 'answer_question', 'generate_question_by_answer', 'generate_answer_affirmative', 'classify_question_type', 'generate_title_affirmative', 'generate_question', 'generate_explanations_affirmative', 'generate_answer_interrogative', 'Is This True?', 'automatic_system', 'Jeopardy style', 'Topic Prediction - Question and Answer Pair', 'Generate Question from Topic', 'found_on_google', 'Topic Prediction - Question Only', 'exercise', 'Decide_good_answer', 'Topic Prediction - Answer Only', 'Direct Answer to Question', 'Given concepts - type 2', 'Put together', 'choice in concept centric sentence generation', 'random task template prompt', 'topics from the sentence', 'sentence to concepts', 'topic to sentence', 'Example prompt', 'Given concepts type 1', 'who', 'write_an_outline', 'news_summary', '2_or_3_sentences', 'tldr_summary', 'news_card_view', 'generate_story', 'sum_in_brief', 'news_stock', 'spice_up_story', 'generate_summary_for_this', 'reverse_writing', 'make_a_title', 'first_sentence_title', 'TLDR', 'write_its_sentence', 'write_a_title_for_this_sentence', 'in_a_nutshell', 'write_an_article', 'what are the key points', 'synthesize', 'summary scenario', 'summarize', 'expand (reverse task)', 'distill', 'Summarize this dialogue:', 'Given the above dialogue write a summary', 'Summarize:', 'To sum up this dialog', 'Generate a summary for this dialogue', 'Write a dialogue that match this summary', 'Sum up the following dialogue']
data_args.test_dataset_config_name = data_args.test_dataset_config_name
data_args.txt_save_dir = 'output_logs_seen_eval_flanT5'
assert (len(data_args.dataset_name) == len(data_args.dataset_config_name))
if (data_args.eval_dataset_name is not None):
assert (len(data_args.eval_dataset_name) == len(data_args.eval_dataset_config_name))
if (data_args.test_dataset_name is not None):
assert (len(data_args.test_dataset_name) == len(data_args.test_dataset_config_name))
padding = ('max_length' if data_args.pad_to_max_length else False)
def preprocess_function(examples, max_target_length, task_id=None):
model_inputs = tokenizer(examples['source'], max_length=data_args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(examples['target'], max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
model_inputs['extra_fields'] = examples['extra_fields']
if (task_id is not None):
model_inputs['task_ids'] = [task_id for _ in examples['extra_fields']]
return model_inputs
column_names = ['source', 'target', 'extra_fields']
performance_metrics = {}
eval_metrics_dict = {((dataset_name + '*') + eval_prompt): AutoTask.get(dataset_name, dataset_config_name, prompt=eval_prompt).metric for (dataset_name, dataset_config_name, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)}
print('')
print(data_args.eval_dataset_name)
print()
print(data_args.eval_dataset_config_name)
print()
print(eval_metrics_dict)
print('')
training_args.do_train = False
if training_args.do_train:
if (data_args.train_files is not None):
train_datasets = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt, seed=data_args.data_seed).get(split='train', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_train_samples, lang=data_args.lang_name, file_name=train_file) for (dataset_name, dataset_config_name, train_file, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_files, data_args.train_prompts)]
for td in train_datasets:
print('')
print(len(td))
print('')
else:
train_datasets = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt, seed=data_args.data_seed).get(split='train', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_train_samples, lang=data_args.lang_name, file_name=data_args.train_file) for (dataset_name, dataset_config_name, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_prompts)]
for td in train_datasets:
print('')
print(len(td))
print('')
max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=train_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, train_prompt) in zip(data_args.dataset_name, data_args.dataset_config_name, data_args.train_prompts)]
for (i, train_dataset) in enumerate(train_datasets):
if (model_args.shared_attn is True):
train_datasets[i] = train_datasets[i].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[i], task_id=i), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
else:
print('')
print(len(train_datasets[i]))
print('')
train_datasets[i] = train_datasets[i].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[i]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
print('')
print(len(train_datasets[i]))
print('')
print('')
print(len(train_dataset))
print('')
train_dataset = concatenate_datasets(train_datasets)
print('')
print(len(train_dataset))
print('')
training_args.do_eval = True
training_args.per_device_eval_batch_size = 64
data_args.max_val_samples = 300
if training_args.do_eval:
max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=eval_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, eval_prompt) in zip(data_args.eval_dataset_name, data_args.eval_dataset_config_name, data_args.eval_prompts)]
with open('/home/joel_jang/seungone/RoE/seq2seq/data/manual/seen_eval_300.json', 'r') as f:
eval_datasets = {}
data = json.load(f)
for eval_dataset in data:
if (len(data[eval_dataset]) == 0):
continue
for eval_prompt in data[eval_dataset]:
tmp_dict = {'source': [], 'target': [], 'extra_fields': [], 'task': []}
for idx in data[eval_dataset][eval_prompt]:
tmp_dict['task'].append(((eval_dataset + '*') + eval_prompt))
tmp_dict['source'].append(data[eval_dataset][eval_prompt][idx]['source'])
tmp_dict['target'].append(data[eval_dataset][eval_prompt][idx]['target'])
if ('labels_list' in data[eval_dataset][eval_prompt][idx]):
if ('labels_list' not in tmp_dict):
tmp_dict['labels_list'] = []
tmp_dict['labels_list'].append(data[eval_dataset][eval_prompt][idx]['labels_list'])
tmp_dict['extra_fields'].append({})
eval_datasets[((eval_dataset + '*') + eval_prompt)] = Dataset.from_dict(tmp_dict)
for (k, name) in enumerate(eval_datasets):
if (name == 'lama_fill_mask'):
max_target_lengths[k] = 2
elif (name == 'lambada_what comes next'):
max_target_lengths[k] = 1
if (model_args.shared_attn is True):
eval_datasets[name] = eval_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k], task_id=k), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
else:
eval_datasets[name] = eval_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_test:
if (data_args.test_files is not None):
test_datasets = {test_dataset: AutoTask.get(test_dataset, test_dataset_config, prompt=test_prompt, seed=data_args.data_seed).get(split='test', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_test_samples, lang=data_args.lang_name, file_name=test_file) for (test_dataset, test_dataset_config, test_file, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_files, data_args.test_prompts)}
else:
test_datasets = {test_dataset: AutoTask.get(test_dataset, test_dataset_config, prompt=test_prompt, seed=data_args.data_seed).get(split='test', split_validation_test=training_args.split_validation_test, add_prefix=(False if adapter_args.train_task_adapters else True), n_obs=data_args.max_test_samples, lang=data_args.lang_name, file_name=data_args.test_file) for (test_dataset, test_dataset_config, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_prompts)}
max_target_lengths = [AutoTask.get(dataset_name, dataset_config_name, prompt=test_prompt).get_max_target_length(tokenizer=tokenizer, default_max_length=data_args.max_target_length) for (dataset_name, dataset_config_name, test_prompt) in zip(data_args.test_dataset_name, data_args.test_dataset_config_name, data_args.test_prompts)]
for (k, name) in enumerate(test_datasets):
if (name == 'lama'):
max_target_lengths[k] = 2
elif (name == 'lambada'):
max_target_lengths[k] = 1
if (model_args.shared_attn is True):
test_datasets[name] = test_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k], task_id=k), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
else:
test_datasets[name] = test_datasets[name].map(functools.partial(preprocess_function, max_target_length=max_target_lengths[k]), batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache))
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = TaskDataCollatorForSeq2Seq(tokenizer, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
if training_args.do_eval:
data_info = {'eval': eval_datasets[((data_args.eval_dataset_name[0] + '*') + data_args.eval_prompts[0])]['extra_fields'], 'test': (test_datasets[((data_args.test_dataset_name[0] + '*') + data_args.test_prompts[0])]['extra_fields'] if training_args.do_test else None), 'train': (train_dataset['extra_fields'] if training_args.do_train else None)}
else:
data_info = {'train': (train_dataset['extra_fields'] if training_args.do_train else None)}
def compute_metrics(eval_preds, task_name):
(preds, labels, data_info, input_ids) = eval_preds
decoded_input_ids = tokenizer.batch_decode(input_ids, skip_special_tokens=True)
decoded_input_ids = [ii.strip() for ii in decoded_input_ids]
post_processor = AutoPostProcessor.get(task_name, tokenizer, data_args.ignore_pad_token_for_loss)
(decoded_preds, decoded_labels) = post_processor.process(preds, labels, data_info)
result = {}
eval_metrics = eval_metrics_dict[task_name]
for metric in eval_metrics:
result.update(metric(decoded_preds, decoded_labels))
if (os.path.isdir(f"./{data_args.txt_save_dir}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}") == False):
os.mkdir(f"./{data_args.txt_save_dir}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}")
with open(f"./{data_args.txt_save_dir}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}/{data_args.dataset_name[0].replace('/', ' ').replace('-', ' ')}*{data_args.train_prompts[0].replace('/', ' ').replace('-', ' ')}-{task_name.replace('/', ' ').replace('-', ' ')}.txt", 'a') as f:
f.write('\n')
f.write(task_name)
f.write('\n')
for (a, b, c) in zip(decoded_preds, decoded_labels, decoded_input_ids):
f.write(a)
f.write(' | ')
f.write(b)
f.write(' | ')
f.write(c)
f.write('\n')
f.write('>> ')
for (key, value) in result.items():
f.write((((str(key) + ' : ') + str(value)) + ' | '))
f.write('\n')
f.write('\n')
return result
if (model_args.attn_learning_rate is not None):
all_parameters = set(model.parameters())
attn_params = []
for (name, param) in model.named_parameters():
if ((name == 'encoder.attn_W_up') or (name == 'encoder.attn_W_down') or (name == 'encoder.layer_norm')):
attn_params += list(param)
attn_params = set(attn_params)
non_attn_params = (all_parameters - attn_params)
non_attn_params = list(non_attn_params)
attn_params = list(attn_params)
optim = AdamW([{'params': non_attn_params}, {'params': attn_params, 'lr': model_args.attn_learning_rate}], lr=training_args.learning_rate)
scheduler = get_linear_schedule_with_warmup(optim, num_warmup_steps=training_args.warmup_steps, num_training_steps=((len(train_dataset) * training_args.num_train_epochs) // (training_args.gradient_accumulation_steps * training_args.per_device_train_batch_size)))
trainer = Seq2SeqTrainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_datasets=(eval_datasets if training_args.do_eval else None), data_info=data_info, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, evaluation_metrics=eval_metrics_dict, shared=model_args.shared_attn, optimizers=(optim, scheduler))
else:
trainer = Seq2SeqTrainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_datasets=(eval_datasets if training_args.do_eval else None), data_info=data_info, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, evaluation_metrics=eval_metrics_dict, shared=model_args.shared_attn)
if training_args.do_eval:
print('')
print(eval_datasets)
print('')
if trainer.is_world_process_zero():
os.makedirs(training_args.output_dir, exist_ok=True)
save_training_config(sys.argv[1], training_args.output_dir)
model_args.save_adapter_weights = False
if model_args.save_adapter_weights:
params_to_save = {}
unfrozen_layers = 0
for (name, param) in trainer.model.named_parameters():
if (param.requires_grad == True):
print(name)
params_to_save[name] = 0
unfrozen_layers += 1
print(f'number of unfrozen layers (for beginning of training model): {unfrozen_layers}')
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
if training_args.compute_time:
torch.cuda.synchronize()
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
train_result = trainer.train(resume_from_checkpoint=checkpoint)
if training_args.compute_time:
end.record()
torch.cuda.synchronize()
total_time = (start.elapsed_time(end) / (1000 * 60))
performance_metrics.update({'total_time in minutes ': total_time})
if model_args.save_adapter_weights:
if (not os.path.exists(os.path.join(training_args.output_dir, 'adapter_params'))):
os.mkdir(os.path.join(training_args.output_dir, 'adapter_params'))
layer_cnt = 0
for (name, param) in trainer.model.named_parameters():
if (name in params_to_save):
save_path = os.path.join(training_args.output_dir, f'adapter_params/{name}.pt')
print(name)
torch.save(param, save_path)
layer_cnt += 1
print(f'finished saving adapters! saved {layer_cnt} number of layers')
exit()
if model_args.save_prefix_only:
for (name, param) in trainer.model.named_parameters():
if ((model_args.attn_prefix_tuning is False) and (('prefix_shared' in name) or ('prefix' in name))):
shared_params = param
torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt'))
elif ((model_args.attn_prefix_tuning is True) and (name == 'prefix_shared')):
shared_params = param
if (model_args.shared_attn is True):
for i in range(config.num_target):
torch.save(shared_params[i], os.path.join(training_args.output_dir, 'prefix_embeddings_{}.pt'.format(i)))
else:
torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_Wa.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_Wa_weights.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_down.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_W_down.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_up.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'attn_W_up.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'layer_norm_weight.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.bias' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(training_args.output_dir, 'layer_norm_bias.pt'))
else:
trainer.save_model()
train_metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
train_metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', train_metrics)
trainer.save_metrics('train', train_metrics)
if (not model_args.save_prefix_only):
trainer.save_state()
if (torch.cuda.is_available() and training_args.compute_memory):
peak_memory = ((torch.cuda.max_memory_allocated() / (1024 ** 2)) / 1000)
print('Memory utilization', peak_memory, 'GB')
performance_metrics.update({'peak_memory': peak_memory})
if (training_args.compute_memory or training_args.compute_time):
trainer.save_metrics('performance', performance_metrics)
if ((model_args.shared_attn is True) and (model_args.ignore_target is False)):
learned_embeddings = trainer.model.encoder.prefix_emb.clone().detach()
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
if (model_args.shared_attn is True):
for (task, eval_dataset) in eval_datasets.items():
metrics = trainer.evaluate(eval_dataset=eval_dataset, max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, task=task)
trainer.log_metrics(f'eval_{task}_', metrics)
trainer.save_metrics(f"eval_{task.replace('/', ' ')}_", metrics)
if training_args.wandb_log:
wandb.log({f'eval_{task}_': metrics})
else:
for (task, eval_dataset) in eval_datasets.items():
print('')
print(task)
print('')
metrics = trainer.evaluate(eval_dataset=eval_dataset, max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, task=task)
trainer.log_metrics(f'eval_{task}_', metrics)
trainer.save_metrics(f"eval_{task.replace('/', ' ')}_", metrics)
if training_args.wandb_log:
wandb.log({f'eval_{task}_': metrics})
if model_args.save_prefix_only:
checkpoints = glob.glob(os.path.join(training_args.output_dir, 'checkpoint-*'))
for checkpoint_dir in checkpoints:
if (not os.path.exists(os.path.join(checkpoint_dir, 'pytorch_model.bin'))):
continue
checkpoint_model = torch.load(os.path.join(os.path.join(checkpoint_dir, 'pytorch_model.bin')))
new_dir = '{}_prompt_only'.format(checkpoint_dir)
os.mkdir(new_dir)
for (name, param) in checkpoint_model.items():
if ((model_args.attn_prefix_tuning is False) and (('prefix_shared' in name) or ('prefix' in name))):
shared_params = param
torch.save(shared_params, os.path.join(training_args.output_dir, 'prefix_embeddings.pt'))
elif ((model_args.attn_prefix_tuning is True) and (name == 'prefix_shared')):
shared_params = param
if (model_args.shared_attn is True):
for i in range(config.num_target):
torch.save(shared_params[i], os.path.join(new_dir, 'prefix_embeddings_{}.pt'.format(i)))
else:
torch.save(shared_params, os.path.join(new_dir, 'prefix_embeddings.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_Wa.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'attn_Wa_weights.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_down.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'attn_W_down.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.attn_W_up.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'attn_W_up.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.weight' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'layer_norm_weight.pt'))
if ((model_args.attn_prefix_tuning is True) and ('encoder.layer_norm.bias' == name)):
attn_weights_params = param
torch.save(attn_weights_params, os.path.join(new_dir, 'layer_norm_bias.pt'))
try:
shutil.rmtree(checkpoint_dir)
except OSError as e:
print(('Error: %s : %s' % (checkpoint_dir, e.strerror)))
if training_args.do_test:
logger.info('*** Test ***')
if (model_args.shared_attn is True):
for (idx, (task, test_dataset)) in enumerate(test_datasets.items()):
trainer.model.encoder.prefix_emb[0].data = learned_embeddings[idx]
metrics = trainer.evaluate(eval_dataset=test_dataset, max_length=data_args.test_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='test', task=task)
trainer.log_metrics(f'test_{task}_', metrics)
trainer.save_metrics(f"test_{task.replace('/', ' ')}_", metrics)
else:
for (task, test_dataset) in test_datasets.items():
metrics = trainer.evaluate(eval_dataset=test_dataset, max_length=data_args.test_max_target_length, num_beams=data_args.num_beams, metric_key_prefix='test', task=task)
trainer.log_metrics(f'test_{task}_', metrics)
trainer.save_metrics(f"test_{task.replace('/', ' ')}_", metrics)
return results |
def _imagenet(split: str) -> Dataset:
if (not (IMAGENET_LOC_ENV in os.environ)):
raise RuntimeError('environment variable for ImageNet directory not set')
dir = os.environ[IMAGENET_LOC_ENV]
if (split == 'train'):
subdir = os.path.join(dir, 'train')
transform = transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
elif (split == 'test'):
subdir = os.path.join(dir, 'val')
transform = transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor()])
return datasets.ImageFolder(subdir, transform) |
def get_model(args, config):
model = None
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, trust_remote_code=args.trust_remote_code, ignore_mismatched_sizes=args.ignore_mismatched_sizes)
else:
logger.info('Training new model from scratch')
model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code)
return model |
class LaVisualizer(object):
def __init__(self):
self.node = []
self.ps = []
self.tags = {}
self.index = 0
self.queue = []
def visualize(self, node):
self.reset()
self.node = node
self.ps = Digraph(name='pet-shop', node_attr={'shape': 'plaintext', 'fontsize': '12', 'height': '.1'}, edge_attr={'arrowsize': '.5', 'minlen': '1'})
self.queue.append(self.node)
while self.queue:
cur_node = self.queue.pop(0)
cur_name = type(cur_node).__name__
cur_index = self.index
if (cur_node in self.tags):
cur_index = self.tags[cur_node]
else:
self.tags[cur_node] = self.index
self.ps.node(name=str(self.index), label=cur_name)
self.index += 1
if isinstance(cur_node.ast, str):
self.ps.node(name=str(self.index), label=cur_node.ast)
self.ps.edge(str(cur_index), str(self.index))
self.index += 1
continue
for (k, v) in cur_node.ast.items():
if (k != 'parseinfo'):
children = getattr(cur_node, k)
if isinstance(children, list):
for child in children:
if (child is not None):
self.handleChild(child, cur_index, k)
elif (children is not None):
self.handleChild(children, cur_index, k)
src = Source(self.ps.source)
src.render('AST', view=False)
def handleChild(self, child, cur_index, k):
if isinstance(child, Node):
node_name = type(child).__name__
self.queue.append(child)
node_index = self.index
if (child in self.tags):
node_index = self.tags[child]
else:
self.ps.node(name=str(self.index), label=((k + ':') + node_name))
self.tags[child] = self.index
self.index += 1
self.ps.edge(str(cur_index), str(node_index))
else:
self.ps.node(name=str(self.index), label=((k + ':') + child))
self.ps.edge(str(cur_index), str(self.index))
self.index += 1
def reset(self):
self.tags = {}
self.index = 0
self.queue = [] |
def parse_args():
parser = argparse.ArgumentParser(description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument('--run', action='store_true', help='run script directly')
parser.add_argument('--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args |
def motar(df: DataFrame, num_matches: int, num_misses: int, num_switches: int, num_false_positives: int, num_objects: int, alpha: float=1.0) -> float:
recall = (num_matches / num_objects)
nominator = (((num_misses + num_switches) + num_false_positives) - ((1 - recall) * num_objects))
denominator = (recall * num_objects)
if (denominator == 0):
motar_val = np.nan
else:
motar_val = (1 - ((alpha * nominator) / denominator))
motar_val = np.maximum(0, motar_val)
return motar_val |
def cmpm_loss_compute(text_embeddings, image_embeddings, labels):
batch_size = image_embeddings.get_shape().as_list()[0]
mylabels = tf.cast(tf.reshape(labels, [batch_size, 1]), tf.float32)
labelD = pairwise_distance(mylabels, mylabels)
label_mask = tf.cast(tf.less(labelD, 0.5), tf.float32)
image_embeddings_norm = tf.nn.l2_normalize(image_embeddings, dim=(- 1))
text_embeddings_norm = tf.nn.l2_normalize(text_embeddings, dim=(- 1))
image_proj_text = tf.matmul(image_embeddings, tf.transpose(text_embeddings_norm))
text_proj_image = tf.matmul(text_embeddings, tf.transpose(image_embeddings_norm))
i2t_pred = tf.nn.softmax(image_proj_text)
t2i_pred = tf.nn.softmax(text_proj_image)
label_mask = tf.divide(label_mask, tf.reduce_sum(label_mask, axis=1, keep_dims=True))
i2t_matching_loss = tf.reduce_mean(tf.reduce_sum((i2t_pred * tf.log((1e-08 + (i2t_pred / (label_mask + 1e-08))))), 1))
t2i_matching_loss = tf.reduce_mean(tf.reduce_sum((t2i_pred * tf.log((1e-08 + (t2i_pred / (label_mask + 1e-08))))), 1))
cosdist = (1.0 - tf.matmul(text_embeddings_norm, tf.transpose(image_embeddings_norm)))
pos_avg_dist = tf.reduce_mean(tf.boolean_mask(cosdist, tf.less(labelD, 0.5)))
neg_avg_dist = tf.reduce_mean(tf.boolean_mask(cosdist, tf.greater(labelD, 0.5)))
return (i2t_matching_loss, t2i_matching_loss, pos_avg_dist, neg_avg_dist) |
class Linear(torch.nn.Linear):
def forward(self, x):
if (x.numel() == 0):
out_shape = [x.shape[0], self.out_features]
empty = NewEmptyTensorOp.apply(x, out_shape)
if self.training:
dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0)
return (empty + dummy)
else:
return empty
return super().forward(x) |
def register_coco_instances_with_attributes(name, metadata, json_file, image_root):
DatasetCatalog.register(name, (lambda : load_coco_with_attributes_json(json_file, image_root, name)))
MetadataCatalog.get(name).set(json_file=json_file, image_root=image_root, evaluator_type='coco', **metadata) |
def read_examples(input_file):
examples = []
unique_id = 0
with open(input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if (not line):
break
line = line.strip()
text_a = None
text_b = None
m = re.match('^(.*) \\|\\|\\| (.*)$', line)
if (m is None):
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples |
def setup_default_logging(default_level=logging.INFO, log_path=''):
console_handler = logging.StreamHandler()
console_handler.setFormatter(FormatterNoInfo())
logging.root.addHandler(console_handler)
logging.root.setLevel(default_level)
if log_path:
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=((1024 ** 2) * 2), backupCount=3)
file_formatter = logging.Formatter('%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s')
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler) |
def save_checkpoint(P, step, best, model_state, optim_state, logdir, is_best=False):
if is_best:
prefix = 'best'
else:
prefix = 'last'
last_model = os.path.join(logdir, f'{prefix}.model')
last_optim = os.path.join(logdir, f'{prefix}.optim')
last_config = os.path.join(logdir, f'{prefix}.configs')
if isinstance(P.inner_lr, OrderedDict):
last_lr = os.path.join(logdir, f'{prefix}.lr')
torch.save(P.inner_lr, last_lr)
if hasattr(P, 'moving_average'):
last_ema = os.path.join(logdir, f'{prefix}.ema')
torch.save(P.moving_average, last_ema)
if hasattr(P, 'moving_inner_lr'):
last_lr_ema = os.path.join(logdir, f'{prefix}.lr_ema')
torch.save(P.moving_inner_lr, last_lr_ema)
if hasattr(P, 'predictor'):
last_predictor = os.path.join(logdir, f'{prefix}.predictor')
torch.save(P.moving_average, last_predictor)
opt = {'step': step, 'best': best}
torch.save(model_state, last_model)
torch.save(optim_state, last_optim)
with open(last_config, 'wb') as handle:
pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL) |
class ConcatDataset(Dataset):
def cumsum(sequence):
(r, s) = ([], 0)
for e in sequence:
l = len(e)
r.append((l + s))
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
for d in self.datasets:
assert (not isinstance(d, IterableDataset)), 'ConcatDataset does not support IterableDataset'
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
if (idx < 0):
if ((- idx) > len(self)):
raise ValueError('absolute value of index should not exceed dataset length')
idx = (len(self) + idx)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx][sample_idx]
def cummulative_sizes(self):
warnings.warn('cummulative_sizes attribute is renamed to cumulative_sizes', DeprecationWarning, stacklevel=2)
return self.cumulative_sizes |
class TFFunnelForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
.register('tile_as')
class TileAsProp(mx.operator.CustomOpProp):
def __init__(self):
super(TileAsProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data_content', 'data_shape']
def list_outputs(self):
return ['data_tiled']
def infer_shape(self, in_shape):
data_content_shape = in_shape[0]
data_shape_shape = in_shape[1]
tiled_data_shape = (data_shape_shape[0], data_content_shape[1], data_content_shape[2], data_content_shape[3])
return ([data_content_shape, data_shape_shape], [tiled_data_shape])
def create_operator(self, ctx, shapes, dtypes):
return TileAsOperator()
def declare_backward_dependency(self, out_grad, in_data, out_data):
return out_grad |
def test_set_literal():
run_cell('x, y, z = 1, 2, 3')
run_cell('s = {x + 1, y + 7}')
run_cell('z = 42')
run_cell('logging.info(s)')
assert_not_detected()
run_cell('x = 17')
run_cell('logging.info(s)')
assert_detected() |
def densenet201(pretrained=False, **kwargs):
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs)
return model |
def predict_type_facenet(image_perturbed, cleancrop):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
def collate_fn(x):
return x
loader = DataLoader(image_perturbed, batch_size=42, shuffle=False, collate_fn=collate_fn)
mtcnn = MTCNN(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True, device=device)
resnet = torch.load('./models/facenet/net_13_022.pth', map_location='cuda:0').to(device)
resnet.eval()
resnet.classify = True
percent = []
typess = []
for X in loader:
C = mtcnn(X)
C = [(cleancrop if (x is None) else x) for x in C]
batch_t = torch.stack(C)
batch_t = batch_t.to(device)
out = resnet(batch_t).cpu()
with torch.no_grad():
(_, indices) = torch.sort(out.detach(), descending=True)
percentage = (torch.nn.functional.softmax(out.detach(), dim=1) * 100)
for i in range(len(out)):
cla = [indices[i][0].item(), indices[i][1].item(), indices[i][2].item(), indices[i][3].item(), indices[i][4].item()]
typess.append(cla)
tage = percentage[i]
percent.append(tage)
return (typess, percent) |
class Quantization_Conf(Conf):
def __init__(self, cfg=None):
if isinstance(cfg, str):
self.usr_cfg = DotDict(self._read_cfg(cfg))
elif isinstance(cfg, DotDict):
self.usr_cfg = DotDict(schema.validate(self._convert_cfg(cfg, copy.deepcopy(quantization_default_schema.validate(dict())))))
else:
self.usr_cfg = DotDict(quantization_default_schema.validate(dict()))
self._model_wise_tune_space = None
self._opwise_tune_space = None
def _merge_dicts(self, src, dst):
for key in src:
if (key in dst):
if (isinstance(dst[key], dict) and isinstance(src[key], dict)):
self._merge_dicts(src[key], dst[key])
elif ((dst[key] == src[key]) or (src[key] is None)):
pass
else:
value = [value for value in src[key] if ((value in dst[key]) or isinstance(value, float))]
if (value != []):
dst[key] = value
return dst
def modelwise_tune_space(self, model_wise_quant):
cfg = self.usr_cfg
self._model_wise_tune_space = OrderedDict()
for optype in model_wise_quant.keys():
if (cfg.quantization.optype_wise and (optype in cfg.quantization.optype_wise)):
self._model_wise_tune_space[optype] = self._merge_dicts(cfg.quantization.optype_wise[optype], model_wise_quant[optype])
else:
self._model_wise_tune_space[optype] = self._merge_dicts(cfg.quantization.model_wise, model_wise_quant[optype])
return self._model_wise_tune_space |
def predict_classes(model, img, xs, watermark, target_class, sl):
imgs_perturbed = add_watermark_to_image(img, xs, watermark, sl)
imgs_perturbed = imgs_perturbed.convert('RGB')
predictions = label_model(model, imgs_perturbed).cpu().detach().numpy()
predictions = predictions[0][target_class]
return predictions |
def iplot(figure_or_data, show_link=True, link_text='Export to plot.ly', validate=True, image=None, filename='plot_image', image_width=800, image_height=600):
if (not __PLOTLY_OFFLINE_INITIALIZED):
raise PlotlyError('\n'.join(['Plotly Offline mode has not been initialized in this notebook. Run: ', '', 'import plotly', 'plotly.offline.init_notebook_mode() # run at the start of every ipython notebook']))
if (not tools._ipython_imported):
raise ImportError('`iplot` can only run inside an IPython Notebook.')
(plot_html, plotdivid, width, height) = _plot_html(figure_or_data, show_link, link_text, validate, '100%', 525, global_requirejs=True)
display(HTML(plot_html))
if image:
if (image not in __IMAGE_FORMATS):
raise ValueError('The image parameter must be one of the following: {}'.format(__IMAGE_FORMATS))
script = get_image_download_script('iplot').format(format=image, width=image_width, height=image_height, filename=filename, plot_id=plotdivid)
time.sleep(1)
display(HTML(script)) |
class Image():
def __init__(self, image_id, features_idx):
self.image_id = image_id
self.features_idx = features_idx
self.features = np.array([])
def load(self, images_features, mem=True):
if len(self.features):
return self.features
else:
features = images_features[self.features_idx]
if mem:
self.features = features
return features |
class MAML():
def __init__(self, inner_algo, env, policy, meta_optimizer, meta_batch_size=40, inner_lr=0.1, outer_lr=0.001, num_grad_updates=1, meta_evaluator=None, evaluate_every_n_epochs=1):
self.sampler_cls = OnPolicyVectorizedSampler
self.max_path_length = inner_algo.max_path_length
self._meta_evaluator = meta_evaluator
self._policy = policy
self._env = env
self._value_function = copy.deepcopy(inner_algo._value_function)
self._initial_vf_state = self._value_function.state_dict()
self._num_grad_updates = num_grad_updates
self._meta_batch_size = meta_batch_size
self._inner_algo = inner_algo
self._inner_optimizer = DifferentiableSGD(self._policy, lr=inner_lr)
self._meta_optimizer = make_optimizer(meta_optimizer, module=policy, lr=_Default(outer_lr), eps=_Default(1e-05))
self._evaluate_every_n_epochs = evaluate_every_n_epochs
def train(self, runner):
last_return = None
for _ in runner.step_epochs():
(all_samples, all_params) = self._obtain_samples(runner)
last_return = self.train_once(runner, all_samples, all_params)
runner.step_itr += 1
return last_return
def train_once(self, runner, all_samples, all_params):
itr = runner.step_itr
old_theta = dict(self._policy.named_parameters())
kl_before = self._compute_kl_constraint(all_samples, all_params, set_grad=False)
meta_objective = self._compute_meta_loss(all_samples, all_params)
self._meta_optimizer.zero_grad()
meta_objective.backward()
self._meta_optimize(all_samples, all_params)
loss_after = self._compute_meta_loss(all_samples, all_params, set_grad=False)
kl_after = self._compute_kl_constraint(all_samples, all_params, set_grad=False)
with torch.no_grad():
policy_entropy = self._compute_policy_entropy([task_samples[0] for task_samples in all_samples])
average_return = self.log_performance(itr, all_samples, meta_objective.item(), loss_after.item(), kl_before.item(), kl_after.item(), policy_entropy.mean().item())
if (self._meta_evaluator and ((itr % self._evaluate_every_n_epochs) == 0)):
self._meta_evaluator.evaluate(self)
update_module_params(self._old_policy, old_theta)
return average_return
def _train_value_function(self, paths):
self._value_function.load_state_dict(self._initial_vf_state)
obs = np.concatenate([path['observations'] for path in paths], axis=0)
returns = np.concatenate([path['returns'] for path in paths])
obs = torch.Tensor(obs)
returns = torch.Tensor(returns)
vf_loss = self._value_function.compute_loss(obs, returns)
self._inner_algo._vf_optimizer.zero_grad()
vf_loss.backward()
self._inner_algo._vf_optimizer.step()
return vf_loss
def _obtain_samples(self, runner):
tasks = self._env.sample_tasks(self._meta_batch_size)
all_samples = [[] for _ in range(len(tasks))]
all_params = []
theta = dict(self._policy.named_parameters())
for (i, task) in enumerate(tasks):
self._set_task(runner, task)
for j in range((self._num_grad_updates + 1)):
paths = runner.obtain_samples(runner.step_itr)
batch_samples = self._process_samples(paths)
all_samples[i].append(batch_samples)
if (j < self._num_grad_updates):
require_grad = (j < (self._num_grad_updates - 1))
self._adapt(batch_samples, set_grad=require_grad)
all_params.append(dict(self._policy.named_parameters()))
update_module_params(self._policy, theta)
return (all_samples, all_params)
def _adapt(self, batch_samples, set_grad=True):
loss = self._inner_algo._compute_loss(*batch_samples[1:])
self._inner_optimizer.zero_grad()
loss.backward(create_graph=set_grad)
with torch.set_grad_enabled(set_grad):
self._inner_optimizer.step()
def _meta_optimize(self, all_samples, all_params):
if isinstance(self._meta_optimizer, ConjugateGradientOptimizer):
self._meta_optimizer.step(f_loss=(lambda : self._compute_meta_loss(all_samples, all_params, set_grad=False)), f_constraint=(lambda : self._compute_kl_constraint(all_samples, all_params)))
else:
self._meta_optimizer.step((lambda : self._compute_meta_loss(all_samples, all_params, set_grad=False)))
def _compute_meta_loss(self, all_samples, all_params, set_grad=True):
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
losses = []
for (task_samples, task_params) in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = ((i < (self._num_grad_updates - 1)) or set_grad)
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
last_update = task_samples[(- 1)]
loss = self._inner_algo._compute_loss(*last_update[1:])
losses.append(loss)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(losses).mean()
def _compute_kl_constraint(self, all_samples, all_params, set_grad=True):
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
kls = []
for (task_samples, task_params) in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = ((i < (self._num_grad_updates - 1)) or set_grad)
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
kl = self._inner_algo._compute_kl_constraint(task_samples[(- 1)].observations)
kls.append(kl)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(kls).mean()
def _compute_policy_entropy(self, task_samples):
obs = torch.stack([samples.observations for samples in task_samples])
entropies = self._inner_algo._compute_policy_entropy(obs)
return entropies.mean()
def _set_task(self, runner, task):
for env in runner._sampler._vec_env.envs:
env.set_task(task)
def policy(self):
return self._policy
def _old_policy(self):
return self._inner_algo._old_policy
def _process_samples(self, paths):
for path in paths:
path['returns'] = tensor_utils.discount_cumsum(path['rewards'], self._inner_algo.discount).copy()
self._train_value_function(paths)
(obs, actions, rewards, _, valids, baselines) = self._inner_algo.process_samples(paths)
return MAMLTrajectoryBatch(paths, obs, actions, rewards, valids, baselines)
def log_performance(self, itr, all_samples, loss_before, loss_after, kl_before, kl, policy_entropy):
tabular.record('Iteration', itr)
name_map = None
if hasattr(self._env, 'all_task_names'):
names = self._env.all_task_names
name_map = dict(zip(names, names))
rtns = log_multitask_performance(itr, TrajectoryBatch.from_trajectory_list(env_spec=self._env.spec, paths=[path for task_paths in all_samples for path in task_paths[self._num_grad_updates].paths]), discount=self._inner_algo.discount, name_map=name_map)
with tabular.prefix((self._policy.name + '/')):
tabular.record('LossBefore', loss_before)
tabular.record('LossAfter', loss_after)
tabular.record('dLoss', (loss_before - loss_after))
tabular.record('KLBefore', kl_before)
tabular.record('KLAfter', kl)
tabular.record('Entropy', policy_entropy)
return np.mean(rtns)
def get_exploration_policy(self):
return copy.deepcopy(self._policy)
def adapt_policy(self, exploration_policy, exploration_trajectories):
(old_policy, self._policy) = (self._policy, exploration_policy)
self._inner_algo.policy = exploration_policy
self._inner_optimizer.module = exploration_policy
paths = exploration_trajectories.to_trajectory_list()
batch_samples = self._process_samples(paths)
self._adapt(batch_samples, set_grad=False)
self._policy = old_policy
self._inner_algo.policy = self._inner_optimizer.module = old_policy
return exploration_policy |
def kitti_labels_to_yolo(dataroot):
from cv2 import imread
print('Converting KITTI labels to YOLO label format.')
imgs_dir = join(dataroot, 'raw', 'training', 'image_2')
labels_dir = join(dataroot, 'raw', 'training', 'label_2')
save_at_dir = join(dataroot, 'raw', 'yolo_style_labels')
make_dirs(save_at_dir)
class_names = ['Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc']
img_file_names = sorted(os.listdir(imgs_dir))
label_file_names = sorted(os.listdir(labels_dir))
label_dict = dict(zip(class_names, range(len(class_names))))
for (img_file_name, label_file_name) in zip(img_file_names, label_file_names):
img_path = join(imgs_dir, img_file_name)
label_path = join(labels_dir, label_file_name)
img = imread(img_path)
(img_height, img_width) = img.shape[:2]
with open(label_path, 'r') as f:
label_lines = f.readlines()
yolo_label_file = open(join(save_at_dir, label_file_name), 'w')
for line in label_lines:
label_entry = line.split(' ')
if (len(label_entry) != 15):
raise Exception(f'Faulty original label in: {label_file_name}')
class_name = label_entry[0]
if (class_name == 'DontCare'):
continue
x1 = float(label_entry[4])
y1 = float(label_entry[5])
x2 = float(label_entry[6])
y2 = float(label_entry[7])
bbox_center_x = (((x1 + x2) / 2.0) / img_width)
bbox_center_y = (((y1 + y2) / 2.0) / img_height)
bbox_width = float(((x2 - x1) / img_width))
bbox_height = float(((y2 - y1) / img_height))
yolo_label_line = f'''{label_dict[class_name]} {bbox_center_x} {bbox_center_y} {bbox_width} {bbox_height}
'''
yolo_label_file.write(yolo_label_line)
yolo_label_file.close() |
def get_downsample_factor(model_config):
try:
neck_cfg = model_config['neck']
except:
model_config = model_config['first_stage_cfg']
neck_cfg = model_config['neck']
downsample_factor = np.prod(neck_cfg.get('ds_layer_strides', [1]))
if (len(neck_cfg.get('us_layer_strides', [])) > 0):
downsample_factor /= neck_cfg.get('us_layer_strides', [])[(- 1)]
backbone_cfg = model_config['backbone']
downsample_factor *= backbone_cfg['ds_factor']
downsample_factor = int(downsample_factor)
assert (downsample_factor > 0)
return downsample_factor |
def get_model(name, pretrained, num_channels, num_classes):
function = getattr(models, name)
model = function(pretrained=pretrained)
if ('resnet' in name):
if (num_channels == 1):
model = ResNet18Grayscale(models.resnet.BasicBlock, [2, 2, 2, 2], num_classes)
else:
model.fc = nn.Linear(512, num_classes)
else:
model = nn.Sequential(*list(model.children())[:(- 1)])
model.classifier.add_module('6', nn.Linear(list(model.classifier.children()))[(- 3)].in_features, num_classes)
return model |
class UnicodeRegex(object):
def __init__(self) -> None:
punctuation = self.property_chars('P')
self.nondigit_punct_re = re.compile((('([^\\d])([' + punctuation) + '])'))
self.punct_nondigit_re = re.compile((('([' + punctuation) + '])([^\\d])'))
self.symbol_re = re.compile((('([' + self.property_chars('S')) + '])'))
def property_chars(self, prefix: str) -> str:
punctuation = ''.join((six.unichr(x) for x in range(sys.maxunicode) if unicodedata.category(six.unichr(x)).startswith(prefix)))
return punctuation |
class ResFeaturePyramidBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, feature_channels: int, convolution: Type=nn.Conv2d, normalization: Type=nn.InstanceNorm2d, activation: Type=nn.PReLU, dropout: float=0.0) -> None:
super(ResFeaturePyramidBlock, self).__init__()
self.dropout = dropout
self.main_mapping = nn.Sequential(convolution(in_channels=in_channels, out_channels=(out_channels // 2), kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=True), normalization(num_features=(out_channels // 2), affine=True, track_running_stats=True), activation(), convolution(in_channels=(out_channels // 2), out_channels=(out_channels // 2), kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=True), normalization(num_features=(out_channels // 2), affine=True, track_running_stats=True), activation())
self.residual_mapping = (convolution(in_channels=in_channels, out_channels=(out_channels // 2), kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True) if (in_channels != (out_channels // 2)) else nn.Identity())
self.upsampling = nn.Upsample(scale_factor=(2, 2), mode='bicubic', align_corners=False)
self.feature_mapping = convolution(in_channels=feature_channels, out_channels=(out_channels // 2), kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
def forward(self, input: torch.Tensor, feature: torch.Tensor) -> torch.Tensor:
output = self.main_mapping(input)
output = (output + self.residual_mapping(input))
output = self.upsampling(output)
if (self.dropout > 0.0):
output = F.dropout(output, p=self.dropout, training=self.training)
output = torch.cat((output, self.feature_mapping(feature).unsqueeze(dim=1).repeat(1, int((output.shape[0] / feature.shape[0])), 1, 1, 1).flatten(0, 1).contiguous()), dim=1)
return output |
def make_weights(N, weights):
assert ((len(weights) % 2) == 1), f'Expected odd number of weights, got: {weights}'
center = int(((len(weights) - 1) / 2))
tokens = np.zeros((N, N))
for i in range(N):
token = np.zeros(N)
for (j, w) in enumerate(weights):
ind = ((i + j) - center)
ind = np.clip(ind, 0, (N - 1))
token[ind] += w
tokens[i] = token
assert np.allclose(tokens.sum(axis=(- 1)), 1)
return tokens |
class ReductionBUnit(nn.Module):
def __init__(self):
super(ReductionBUnit, self).__init__()
in_channels = 1088
self.branches = Concurrent()
self.branches.add_module('branch1', ConvSeqBranch(in_channels=in_channels, out_channels_list=(256, 384), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0)))
self.branches.add_module('branch2', ConvSeqBranch(in_channels=in_channels, out_channels_list=(256, 288), kernel_size_list=(1, 3), strides_list=(1, 2), padding_list=(0, 0)))
self.branches.add_module('branch3', ConvSeqBranch(in_channels=in_channels, out_channels_list=(256, 288, 320), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 2), padding_list=(0, 1, 0)))
self.branches.add_module('branch4', MaxPoolBranch())
def forward(self, x):
x = self.branches(x)
return x |
class TrainingSampler(Sampler):
def __init__(self, size: int, shuffle: bool=True, seed: Optional[int]=None):
self._size = size
assert (size > 0)
self._shuffle = shuffle
if (seed is None):
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
def __iter__(self):
start = self._rank
(yield from itertools.islice(self._infinite_indices(), start, None, self._world_size))
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
if self._shuffle:
(yield from torch.randperm(self._size, generator=g))
else:
(yield from torch.arange(self._size)) |
class BuildCommand(build):
def run(self):
script_path = os.path.dirname(os.path.abspath(__file__))
sym_path = os.path.join(script_path, 'interpret', 'root', 'shared', 'libebm')
if os.path.exists(sym_path):
build_libebm()
build_vis_if_needed()
build.run(self) |
def test_standard_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
anchor_generator_cfg = dict(type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert (anchor_generator is not None) |
def validate_on_data(model: Model, data: Dataset, batch_size: int, use_cuda: bool, max_output_length: int, level: str, eval_metric: Optional[str], n_gpu: int, batch_class: Batch=Batch, compute_loss: bool=False, beam_size: int=1, beam_alpha: int=(- 1), batch_type: str='sentence', postprocess: bool=True, bpe_type: str='subword-nmt', sacrebleu: dict=None) -> (float, float, float, List[str], List[List[str]], List[str], List[str], List[List[str]], List[np.array]):
assert (batch_size >= n_gpu), 'batch_size must be bigger than n_gpu.'
if (sacrebleu is None):
sacrebleu = {'remove_whitespace': True, 'tokenize': '13a', 'use_detokenization': False}
if ((batch_size > 1000) and (batch_type == 'sentence')):
logger.warning("WARNING: Are you sure you meant to work on huge batches like this? 'batch_size' is > 1000 for sentence-batching. Consider decreasing it or switching to 'eval_batch_type: token'.")
valid_iter = make_data_iter(dataset=data, batch_size=batch_size, batch_type=batch_type, shuffle=False, train=False)
valid_sources_raw = data.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
model.eval()
if (not hasattr(model, 'combiner')):
model.combiner = NoCombiner()
with torch.no_grad():
all_outputs = []
valid_attention_scores = []
total_loss = 0
total_ntokens = 0
total_nseqs = 0
for valid_batch in iter(valid_iter):
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
sort_reverse_index = batch.sort_by_src_length()
if (compute_loss and (batch.trg is not None)):
(batch_loss, _, _, _) = model(return_type='combiner_loss', **vars(batch))
if (n_gpu > 1):
batch_loss = batch_loss.mean()
total_loss += batch_loss
total_ntokens += batch.ntokens
total_nseqs += batch.nseqs
(output, attention_scores) = run_batch(model=model, batch=batch, beam_size=beam_size, beam_alpha=beam_alpha, max_output_length=max_output_length)
all_outputs.extend(output[sort_reverse_index])
valid_attention_scores.extend((attention_scores[sort_reverse_index] if (attention_scores is not None) else []))
assert (len(all_outputs) == len(data))
if (compute_loss and (total_ntokens > 0)):
valid_loss = total_loss
valid_ppl = torch.exp((total_loss / total_ntokens))
else:
valid_loss = (- 1)
valid_ppl = (- 1)
decoded_valid = model.trg_vocab.arrays_to_sentences(arrays=all_outputs, cut_at_eos=True)
join_char = (' ' if (level in ['word', 'bpe']) else '')
valid_sources = [join_char.join(s) for s in data.src]
valid_references = [join_char.join(t) for t in data.trg]
valid_hypotheses = [join_char.join(t) for t in decoded_valid]
if ((level == 'bpe') and postprocess):
valid_sources = [bpe_postprocess(s, bpe_type=bpe_type) for s in valid_sources]
valid_references = [bpe_postprocess(v, bpe_type=bpe_type) for v in valid_references]
valid_hypotheses = [bpe_postprocess(v, bpe_type=bpe_type) for v in valid_hypotheses]
if sacrebleu['use_detokenization']:
batch_src_detokenize = sacrebleu['batch_src_detokenize']
batch_trg_detokenize = sacrebleu['batch_trg_detokenize']
valid_sources = batch_src_detokenize(valid_sources)
valid_references = batch_trg_detokenize(valid_references)
valid_hypotheses = batch_trg_detokenize(valid_hypotheses)
if valid_references:
assert (len(valid_hypotheses) == len(valid_references))
current_valid_score = 0
if (eval_metric.lower() == 'bleu'):
current_valid_score = bleu(valid_hypotheses, valid_references, tokenize=sacrebleu['tokenize'])
elif (eval_metric.lower() == 'chrf'):
current_valid_score = chrf(valid_hypotheses, valid_references, remove_whitespace=sacrebleu['remove_whitespace'])
elif (eval_metric.lower() == 'token_accuracy'):
current_valid_score = token_accuracy(list(decoded_valid), list(data.trg))
elif (eval_metric.lower() == 'sequence_accuracy'):
current_valid_score = sequence_accuracy(valid_hypotheses, valid_references)
else:
current_valid_score = (- 1)
return (current_valid_score, valid_loss, valid_ppl, valid_sources, valid_sources_raw, valid_references, valid_hypotheses, decoded_valid, valid_attention_scores) |
def compute_joint(x_out: Tensor, x_tf_out: Tensor) -> Tensor:
assert simplex(x_out), f'x_out not normalized.'
assert simplex(x_tf_out), f'x_tf_out not normalized.'
(bn, k) = x_out.shape
assert ((x_tf_out.size(0) == bn) and (x_tf_out.size(1) == k))
p_i_j = (x_out.unsqueeze(2) * x_tf_out.unsqueeze(1))
p_i_j = p_i_j.sum(dim=0)
p_i_j = ((p_i_j + p_i_j.t()) / 2.0)
p_i_j /= p_i_j.sum()
return p_i_j |
def sql_window_api(spark):
print('Start running Window and WindowSpec API')
sc = spark.sparkContext
sqlContext = SQLContext(sc)
df = spark.createDataFrame([('Alice', 2, 50), ('Alice', 3, 50), ('Alice', 2, 60), ('Alice', 3, 60), ('Alice', 2, 70), ('Bob', 3, 50), ('Bob', 3, 60), ('Bob', 4, 50)], ['name', 'age', 'height'])
window = Window().partitionBy('name')
df.withColumn('mean', mean('height').over(window)).show()
window = Window().partitionBy('name').orderBy('height').rangeBetween((- 4), 0)
df.withColumn('mean', mean('height').over(window)).show()
window = Window().partitionBy('name').orderBy('height').rowsBetween(Window.currentRow, 1)
df.withColumn('mean', mean('height').over(window)).show()
print('Finish running Window and WindowSpec API') |
class ComplicatedInputDataset(torch.utils.data.Dataset):
def __init__(self, size=1000, nested_input=True) -> None:
super().__init__()
self.size = size
X1_1 = torch.rand((self.size // 2), 1)
X1_2 = (torch.rand((self.size // 2), 1) + 1.5)
self.X1 = torch.cat([X1_1, X1_2], dim=0)
X2_1 = (torch.rand((self.size // 2), 1) + 1.5)
X2_2 = (torch.rand((self.size // 2), 1) + 3.0)
self.X2 = torch.cat([X2_1, X2_2], dim=0)
X3_1 = (torch.rand((self.size // 2), 1) + 3.0)
X3_2 = (torch.rand((self.size // 2), 1) + 4.5)
self.X3 = torch.cat([X3_1, X3_2], dim=0)
X4_1 = (torch.rand((self.size // 2), 1) + 4.5)
X4_2 = (torch.rand((self.size // 2), 1) + 6.0)
self.X4 = torch.cat([X4_1, X4_2], dim=0)
Y1 = torch.zeros((self.size // 2), 1)
Y2 = torch.ones((self.size // 2), 1)
self.Y = torch.cat([Y1, Y2], dim=0)
def __getitem__(self, index):
return ((self.X1[index], self.X2[index]), {'x3': self.X3[index]}, self.X4[index], self.Y[index])
def __len__(self):
return self.size |
class LineActiveSchedulerND(_SubspacePointActiveSchedulerND):
name = 'Line'
def __init__(self, N_STEPS, D, point, iaxis):
if (D.nd < 2):
raise Exception('ERROR: requires nd >=2')
if (len(point) != (D.nd - 1)):
raise Exception(('ERROR: point incorrect shape %s' % (point.shape,)))
super().__init__(N_STEPS, D, point, iaxes=[iaxis]) |
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--num-workers', default=0, type=int, metavar='N', help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true', help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N', help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N', help='maximum number of sentences in a batch')
group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N', help='batch size will be a multiplier of this value')
parser.add_argument('--dataset-impl', metavar='FORMAT', help='output dataset implementation', choices=['raw', 'lazy', 'cached', 'mmap'], default='cached')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT', choices=['train', 'valid', 'test'], help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT', help='comma separated list of data subsets to use for validation (train, valid, valid1, test, test1)')
group.add_argument('--validate-interval', type=int, default=1, metavar='N', help='validate every N epochs')
group.add_argument('--disable-validation', action='store_true', help='disable validation')
group.add_argument('--max-sentences-valid', type=int, metavar='N', help='maximum number of sentences in a validation batch (defaults to --max-sentences)')
group.add_argument('--curriculum', default=0, type=int, metavar='N', help="don't shuffle batches for first N epochs")
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT', help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N', help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID', help='id of the shard to generate (id < num_shards)')
return group |
def _get_triplet_mask(labels):
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
label_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
i_equal_j = tf.expand_dims(label_equal, 2)
i_equal_k = tf.expand_dims(label_equal, 1)
valid_labels = tf.logical_and(i_equal_j, tf.logical_not(i_equal_k))
mask = tf.logical_and(distinct_indices, valid_labels)
return mask |
class TestInputFn(tf.test.TestCase):
def _test_with_args(self, **kwargs):
(sources_file, targets_file) = test_utils.create_temp_parallel_data(sources=['Hello World .'], targets=['Goodbye .'])
pipeline = input_pipeline.ParallelTextInputPipeline(params={'source_files': [sources_file.name], 'target_files': [targets_file.name]}, mode=tf.contrib.learn.ModeKeys.TRAIN)
input_fn = training_utils.create_input_fn(pipeline=pipeline, **kwargs)
(features, labels) = input_fn()
with self.test_session() as sess:
with tf.contrib.slim.queues.QueueRunners(sess):
(features_, labels_) = sess.run([features, labels])
self.assertEqual(set(features_.keys()), set(['source_tokens', 'source_len']))
self.assertEqual(set(labels_.keys()), set(['target_tokens', 'target_len']))
def test_without_buckets(self):
self._test_with_args(batch_size=10)
def test_wit_buckets(self):
self._test_with_args(batch_size=10, bucket_boundaries=[0, 5, 10]) |
def get_last_ckpt_in_dir(dir: str, ckpt_pattern: str='*.ckpt', key_sort: Callable=(lambda x: x.stat().st_mtime)) -> Optional[Path]:
ckpts = get_ckpts_in_dir(dir, ckpt_pattern)
if (ckpts == []):
return None
ckpts.sort(key=key_sort, reverse=False)
return ckpts[(- 1)] |
class RPNLogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNLogLossMetric, self).__init__('RPNLogLoss')
(self.pred, self.label) = get_rpn_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
label = label.asnumpy().astype('int32').reshape((- 1))
pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], (- 1))).transpose((0, 2, 1))
pred = pred.reshape((label.shape[0], (- 1)))
keep_inds = np.where((label != (- 1)))[0]
label = label[keep_inds]
cls = pred[(keep_inds, label)]
cls += 1e-14
cls_loss = ((- 1) * np.log(cls))
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0] |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = GpuParallelSampler(EnvCls=gym_make, env_kwargs=config['env'], CollectorCls=ResetCollector, **config['sampler'])
algo = PPO(optim_kwargs=config['optim'], **config['algo'])
agent = MujocoFfAgent(model_kwargs=config['model'], **config['agent'])
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = config['env']['id']
with logger_context(log_dir, run_ID, name, config):
runner.train() |
class Pyramids(object):
def __init__(self, levels=1):
assert (levels >= 1)
self.levels = levels
def __call__(self, img) -> list:
img_pyd = [img]
for i in range((self.levels - 1)):
img_pyd.append(Image.fromarray(cv2.pyrDown(np.array(img_pyd[(- 1)]))))
return img_pyd
def __repr__(self):
return (self.__class__.__name__ + '(levels={})'.format(self.levels)) |
def treeFromFile(filename):
with open(filename) as urdf_file:
return treeFromUrdfModel(urdf.URDF.from_xml_string(urdf_file.read())) |
def create_default_local_file():
comment = {'results_path': 'Where to store tracking results', 'network_path': 'Where tracking networks are stored.'}
path = os.path.join(os.path.dirname(__file__), 'local.py')
with open(path, 'w') as f:
settings = EnvSettings()
f.write('from pytracking.evaluation.environment import EnvSettings\n\n')
f.write('def local_env_settings():\n')
f.write(' settings = EnvSettings()\n\n')
f.write(' # Set your local paths here.\n\n')
for attr in dir(settings):
comment_str = None
if (attr in comment):
comment_str = comment[attr]
attr_val = getattr(settings, attr)
if ((not attr.startswith('__')) and (not callable(attr_val))):
if (comment_str is None):
f.write(" settings.{} = '{}'\n".format(attr, attr_val))
else:
f.write(" settings.{} = '{}' # {}\n".format(attr, attr_val, comment_str))
f.write('\n return settings\n\n') |
class TestTransformerEncoder(unittest.TestCase):
def test_full_attention_forward(self):
d_model = 128
n_heads = 4
transformer = TransformerEncoder([TransformerEncoderLayer(AttentionLayer(ClusteredAttention(clusters=10), d_model, n_heads), d_model, n_heads) for i in range(6)])
x = transformer(torch.rand(100, 20, d_model))
self.assertEqual(x.shape, (100, 20, d_model)) |
_vision
class CLIPProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|endoftext|>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
feature_extractor_map = {'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0., 0.4578275, 0.], 'image_std': [0., 0., 0.]}
self.feature_extractor_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.feature_extractor_file, 'w', encoding='utf-8') as fp:
json.dump(feature_extractor_map, fp)
def get_tokenizer(self, **kwargs):
return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_feature_extractor(self, **kwargs):
return CLIPFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, CLIPTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, CLIPFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = CLIPProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = CLIPProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, CLIPTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, CLIPFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
image_input = self.prepare_image_inputs()
input_feat_extract = feature_extractor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = 'lower newer'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = CLIPProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor) |
def extract_into_tensor(a, t, x_shape):
(b, *_) = t.shape
out = a.gather((- 1), t)
return out.reshape(b, *((1,) * (len(x_shape) - 1))) |
def mdetr_efficientnetB3_refcocoplus(pretrained=False, return_postprocessor=False):
model = _make_detr('timm_tf_efficientnet_b3_ns')
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url=' map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model |
class BertEmbeddings(nn.Module):
def __init__(self, bert_model):
super().__init__()
config = bert_model.config
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.word_embeddings.load_state_dict(bert_model.embeddings.word_embeddings.state_dict())
self.LayerNorm.load_state_dict(bert_model.embeddings.LayerNorm.state_dict()) |
def check_integrity(fpath, md5=None):
if (md5 is None):
return True
if (not os.path.isfile(fpath)):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter((lambda : f.read((1024 * 1024))), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if (md5c != md5):
return False
return True |
class SGLD(Optimizer):
def __init__(self, params, lr=0.01, std_dev=0.0, decay=None) -> None:
if (lr < 0.0):
raise ValueError('Invalid learning rate: {}'.format(lr))
defaults = dict(lr=lr, std_dev=std_dev)
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for parameter in group['params']:
if (parameter.grad is None):
continue
state = self.state[parameter]
lr = group['lr']
std_dev = group['std_dev']
gradient = parameter.grad.data
if (len(state) == 0):
state['iteration'] = 0
state['momentum'] = torch.ones_like(parameter)
current_std_dev = self.polynomial(state['iteration'], 15, std_dev)
noise = (current_std_dev * torch.ones_like(parameter.grad))
state['iteration'] += 1
parameter.data.add_((((- lr) * gradient) + noise))
return loss
def cyclic(self, T, i, lr, M=4, min_lr=0.0):
rcounter = (T + i)
cos_inner = (np.pi * (rcounter % (T // M)))
cos_inner /= (T // M)
cos_out = (np.cos(cos_inner) + 1)
lr = float(np.clip(((0.5 * cos_out) * lr), min_lr, 100))
return lr
def polynomial(self, t, T, base_lr, end_lr=0.0001, power=1.0):
lr = (((base_lr - end_lr) * ((1 - (t / T)) ** power)) + end_lr)
return lr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.