code stringlengths 101 5.91M |
|---|
def is_tf_113():
version = get_tf_version()
return ((int(version[0]) == 1) and (int(version[1]) == 13)) |
def test_TargetPipelineCreator_repeated_names() -> None:
creator = TargetPipelineCreator()
creator.add('zscore')
creator.add('zscore')
pipeline = creator.to_pipeline()
assert isinstance(pipeline, JuTargetPipeline)
assert (len(pipeline.steps) == 2)
assert (pipeline.steps[0][0] == 'zscore')
assert (pipeline.steps[1][0] == 'zscore_1') |
def test_lambda_closure_cleanup():
m.test_cleanup()
cstats = m.payload_cstats()
assert (cstats.alive() == 0)
assert (cstats.copy_constructions == 1)
assert (cstats.move_constructions >= 1) |
class Preprocess(Layer):
def call(self, x, mask=None):
(bsize, nb_rows, nb_cols, nb_colors) = K.int_shape(x)
if ((nb_rows != 256) or (nb_cols != 256)):
x256 = tf.image.resize_bilinear(x, [256, 256], align_corners=True, name='resize')
else:
x256 = x
if (K.dtype(x) == 'float32'):
xout = x256
else:
xout = preprocess_input(x256)
return xout
def compute_output_shape(self, input_shape):
return (input_shape[0], 256, 256, 3) |
def getLargestCC(segmentation):
labels = label(segmentation, connectivity=1)
largestCC = (labels == np.argmax(np.bincount(labels.flat)))
return largestCC |
class Output(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=3, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=True, use_coord=False):
super(Output, self).__init__()
kwargs = {'kernel_size': kernel_size, 'padding': 0, 'bias': True}
self.conv1 = coord_conv(input_nc, output_nc, use_spect, use_coord, **kwargs)
if (type(norm_layer) == type(None)):
self.model = nn.Sequential(nonlinearity, nn.ReflectionPad2d(int((kernel_size / 2))), self.conv1, nn.Tanh())
else:
self.model = nn.Sequential(norm_layer(input_nc), nonlinearity, nn.ReflectionPad2d(int((kernel_size / 2))), self.conv1, nn.Tanh())
def forward(self, x):
out = self.model(x)
return out |
class BertEncoderWithPabee(BertEncoder):
def adaptive_forward(self, hidden_states, current_layer, attention_mask=None, head_mask=None):
layer_outputs = self.layer[current_layer](hidden_states, attention_mask, head_mask[current_layer])
hidden_states = layer_outputs[0]
return hidden_states |
def main():
if (args.gpu is not None):
print(f'Use GPU: {args.gpu} for training')
print('=====> Preparing data...')
print(f'File (.csv): {args.dataset}.csv')
df = pd.read_csv(os.path.join(args.data_dir, f'{args.dataset}.csv'))
(df_train, df_val, df_test) = (df[(df['split'] == 'train')], df[(df['split'] == 'val')], df[(df['split'] == 'test')])
train_labels = df_train['age']
train_dataset = IMDBWIKI(data_dir=args.data_dir, df=df_train, img_size=args.img_size, split='train', reweight=args.reweight, lds=args.lds, lds_kernel=args.lds_kernel, lds_ks=args.lds_ks, lds_sigma=args.lds_sigma)
val_dataset = IMDBWIKI(data_dir=args.data_dir, df=df_val, img_size=args.img_size, split='val')
test_dataset = IMDBWIKI(data_dir=args.data_dir, df=df_test, img_size=args.img_size, split='test')
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=False)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=False)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=False)
print(f'Training data size: {len(train_dataset)}')
print(f'Validation data size: {len(val_dataset)}')
print(f'Test data size: {len(test_dataset)}')
print('=====> Building model...')
model = resnet50(fds=args.fds, bucket_num=args.bucket_num, bucket_start=args.bucket_start, start_update=args.start_update, start_smooth=args.start_smooth, kernel=args.fds_kernel, ks=args.fds_ks, sigma=args.fds_sigma, momentum=args.fds_mmt)
model = torch.nn.DataParallel(model).cuda()
if args.evaluate:
assert args.resume, 'Specify a trained model using [args.resume]'
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'], strict=False)
print(f"===> Checkpoint '{args.resume}' loaded (epoch [{checkpoint['epoch']}]), testing...")
validate(test_loader, model, train_labels=train_labels, prefix='Test')
return
if args.retrain_fc:
assert ((args.reweight != 'none') and args.pretrained)
print('===> Retrain last regression layer only!')
for (name, param) in model.named_parameters():
if (('fc' not in name) and ('linear' not in name)):
param.requires_grad = False
if (not args.retrain_fc):
optimizer = (torch.optim.Adam(model.parameters(), lr=args.lr) if (args.optimizer == 'adam') else torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay))
else:
parameters = list(filter((lambda p: p.requires_grad), model.parameters()))
names = list(filter((lambda k: (k is not None)), [(k if v.requires_grad else None) for (k, v) in model.module.named_parameters()]))
assert (1 <= len(parameters) <= 2)
print(f'===> Only optimize parameters: {names}')
optimizer = (torch.optim.Adam(parameters, lr=args.lr) if (args.optimizer == 'adam') else torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay))
if args.pretrained:
checkpoint = torch.load(args.pretrained, map_location='cpu')
from collections import OrderedDict
new_state_dict = OrderedDict()
for (k, v) in checkpoint['state_dict'].items():
if (('linear' not in k) and ('fc' not in k)):
new_state_dict[k] = v
model.load_state_dict(new_state_dict, strict=False)
print(f'===> Pretrained weights found in total: [{len(list(new_state_dict.keys()))}]')
print(f'===> Pre-trained model loaded: {args.pretrained}')
if args.resume:
if os.path.isfile(args.resume):
print(f"===> Loading checkpoint '{args.resume}'")
checkpoint = (torch.load(args.resume) if (args.gpu is None) else torch.load(args.resume, map_location=torch.device(f'cuda:{str(args.gpu)}')))
args.start_epoch = checkpoint['epoch']
args.best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(f"===> Loaded checkpoint '{args.resume}' (Epoch [{checkpoint['epoch']}])")
else:
print(f"===> No checkpoint found at '{args.resume}'")
cudnn.benchmark = True
for epoch in range(args.start_epoch, args.epoch):
adjust_learning_rate(optimizer, epoch, args)
train_loss = train(train_loader, model, optimizer, epoch)
(val_loss_mse, val_loss_l1, val_loss_gmean) = validate(val_loader, model, train_labels=train_labels)
loss_metric = (val_loss_mse if (args.loss == 'mse') else val_loss_l1)
is_best = (loss_metric < args.best_loss)
args.best_loss = min(loss_metric, args.best_loss)
print(f"Best {('L1' if ('l1' in args.loss) else 'MSE')} Loss: {args.best_loss:.3f}")
save_checkpoint(args, {'epoch': (epoch + 1), 'model': args.model, 'best_loss': args.best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best)
print(f'Epoch #{epoch}: Train loss [{train_loss:.4f}]; Val loss: MSE [{val_loss_mse:.4f}], L1 [{val_loss_l1:.4f}], G-Mean [{val_loss_gmean:.4f}]')
tb_logger.log_value('train_loss', train_loss, epoch)
tb_logger.log_value('val_loss_mse', val_loss_mse, epoch)
tb_logger.log_value('val_loss_l1', val_loss_l1, epoch)
tb_logger.log_value('val_loss_gmean', val_loss_gmean, epoch)
print(('=' * 120))
print('Test best model on testset...')
checkpoint = torch.load(f'{args.store_root}/{args.store_name}/ckpt.best.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
print(f"Loaded best model, epoch {checkpoint['epoch']}, best val loss {checkpoint['best_loss']:.4f}")
(test_loss_mse, test_loss_l1, test_loss_gmean) = validate(test_loader, model, train_labels=train_labels, prefix='Test')
print(f'''Test loss: MSE [{test_loss_mse:.4f}], L1 [{test_loss_l1:.4f}], G-Mean [{test_loss_gmean:.4f}]
Done''') |
def compute_r2_score(input_probs, target):
r2 = metrics.r2_score(target.cpu().detach().numpy(), input_probs.cpu().detach().numpy())
return r2 |
def main(train_file, valid_file, embeddings_file, target_dir, hidden_size=300, dropout=0.5, num_classes=3, epochs=64, batch_size=32, lr=0.0004, patience=5, max_grad_norm=10.0, checkpoint=None):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
print((20 * '='), ' Preparing for training ', (20 * '='))
if (not os.path.exists(target_dir)):
os.makedirs(target_dir)
print('\t* Loading training data...')
with open(train_file, 'rb') as pkl:
train_data = NLIDataset(pickle.load(pkl))
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
print('\t* Loading validation data...')
with open(valid_file, 'rb') as pkl:
valid_data = NLIDataset(pickle.load(pkl))
valid_loader = DataLoader(valid_data, shuffle=False, batch_size=batch_size)
print('\t* Building model...')
with open(embeddings_file, 'rb') as pkl:
embeddings = torch.tensor(pickle.load(pkl), dtype=torch.float).to(device)
model = ESIM(embeddings.shape[0], embeddings.shape[1], hidden_size, embeddings=embeddings, dropout=dropout, num_classes=num_classes, device=device).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=0)
best_score = 0.0
start_epoch = 1
epochs_count = []
train_losses = []
valid_losses = []
if checkpoint:
checkpoint = torch.load(checkpoint)
start_epoch = (checkpoint['epoch'] + 1)
best_score = checkpoint['best_score']
print('\t* Training will continue on existing model from epoch {}...'.format(start_epoch))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
epochs_count = checkpoint['epochs_count']
train_losses = checkpoint['train_losses']
valid_losses = checkpoint['valid_losses']
(_, valid_loss, valid_accuracy) = validate(model, valid_loader, criterion)
print('\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%'.format(valid_loss, (valid_accuracy * 100)))
print('\n', (20 * '='), 'Training ESIM model on device: {}'.format(device), (20 * '='))
patience_counter = 0
for epoch in range(start_epoch, (epochs + 1)):
epochs_count.append(epoch)
print('* Training epoch {}:'.format(epoch))
(epoch_time, epoch_loss, epoch_accuracy) = train(model, train_loader, optimizer, criterion, epoch, max_grad_norm)
train_losses.append(epoch_loss)
print('-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%'.format(epoch_time, epoch_loss, (epoch_accuracy * 100)))
print('* Validation for epoch {}:'.format(epoch))
(epoch_time, epoch_loss, epoch_accuracy) = validate(model, valid_loader, criterion)
valid_losses.append(epoch_loss)
print('-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n'.format(epoch_time, epoch_loss, (epoch_accuracy * 100)))
scheduler.step(epoch_accuracy)
if (epoch_accuracy < best_score):
patience_counter += 1
else:
best_score = epoch_accuracy
patience_counter = 0
torch.save({'epoch': epoch, 'model': model.state_dict(), 'best_score': best_score, 'epochs_count': epochs_count, 'train_losses': train_losses, 'valid_losses': valid_losses}, os.path.join(target_dir, 'best.pth.tar'))
torch.save({'epoch': epoch, 'model': model.state_dict(), 'best_score': best_score, 'optimizer': optimizer.state_dict(), 'epochs_count': epochs_count, 'train_losses': train_losses, 'valid_losses': valid_losses}, os.path.join(target_dir, 'esim_{}.pth.tar'.format(epoch)))
if (patience_counter >= patience):
print('-> Early stopping: patience limit reached, stopping...')
break
plt.figure()
plt.plot(epochs_count, train_losses, '-r')
plt.plot(epochs_count, valid_losses, '-b')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training loss', 'Validation loss'])
plt.title('Cross entropy loss')
plt.show() |
def test_mp_ref_energies() -> None:
for (key, val) in mp_elemental_ref_energies.items():
actual = mp_elem_reference_entries[key].energy_per_atom
assert (actual == approx(val, abs=0.001)), f'key={key!r}'
assert (actual == approx(val, abs=0.001)), f'key={key!r}' |
class Word2VecPooled(Word2Vec):
def __init__(self, TEXT=None, embedding_dim=50, batch_size=10, n_gram=4, pooling='avg_pool'):
super(Word2VecPooled, self).__init__(TEXT=TEXT, embedding_dim=embedding_dim, batch_size=batch_size, n_gram=n_gram)
self.pooling = pooling
if (self.pooling == 'avg_pool'):
self.avg_pool_layer = MaskedAvgPoolingLayer()
def forward(self, idx_word, idx_context, context_mask, train=True):
context = self.embeddings_context(idx_context)
if (self.pooling == 'avg_pool'):
context = self.avg_pool_layer(context, context_mask, dim=1)
elif (self.pooling == 'avg_pool_unmasked'):
context = torch.mean(context, dim=1)
elif (self.pooling == 'max_pool'):
context = torch.max(context, dim=1)
else:
raise ValueError(f'Pool type {self.pooling} is not allowed for vectors')
word = self.embeddings_word(idx_word)
score = torch.sum((word * context.unsqueeze(1)), dim=(- 1))
return score |
def single_tune(data_continuum, default_params, tune_params, params_keep, tmp_acc, run):
tune_data = []
test_loaders_full = setup_test_loader(data_continuum.test_data(), default_params)
tune_test_loaders = test_loaders_full[:default_params.num_val]
test_loaders = test_loaders_full[default_params.num_val:]
if default_params.online:
for (i, (x_train, y_train, labels)) in enumerate(data_continuum):
if (i < default_params.num_val):
tune_data.append((x_train, y_train, labels))
if (len(tune_data) == default_params.num_val):
best_params = tune_hyper(tune_data, tune_test_loaders, default_params, tune_params)
params_keep.append(best_params)
final_params = vars(default_params)
final_params.update(best_params)
final_params = SimpleNamespace(**final_params)
print('Tuning is done. Best hyper parameter set is {}'.format(best_params))
model = setup_architecture(final_params)
model = maybe_cuda(model, final_params.cuda)
opt = setup_opt(final_params.optimizer, model, final_params.learning_rate, final_params.weight_decay)
agent = agents[final_params.agent](model, opt, final_params)
print('Training Start')
else:
print('run {} training batch {}'.format(run, i))
print('size: {}, {}'.format(x_train.shape, y_train.shape))
agent.train_learner(x_train, y_train)
acc_array = agent.evaluate(test_loaders)
tmp_acc.append(acc_array)
else:
x_train_offline = []
y_train_offline = []
x_tune_offline = []
y_tune_offline = []
labels_offline = []
for (i, (x_train, y_train, labels)) in enumerate(data_continuum):
if (i < default_params.num_val):
x_tune_offline.append(x_train)
y_tune_offline.append(y_train)
labels_offline.append(labels)
else:
x_train_offline.append(x_train)
y_train_offline.append(y_train)
tune_data = [(np.concatenate(x_tune_offline, axis=0), np.concatenate(y_tune_offline, axis=0), np.concatenate(labels_offline, axis=0))]
best_params = tune_hyper(tune_data, tune_test_loaders, default_params, tune_params)
params_keep.append(best_params)
final_params = vars(default_params)
final_params.update(best_params)
final_params = SimpleNamespace(**final_params)
print('Tuning is done. Best hyper parameter set is {}'.format(best_params))
model = setup_architecture(final_params)
model = maybe_cuda(model, final_params.cuda)
opt = setup_opt(final_params.optimizer, model, final_params.learning_rate, final_params.weight_decay)
agent = agents[final_params.agent](model, opt, final_params)
print('Training Start')
x_train_offline = np.concatenate(x_train_offline, axis=0)
y_train_offline = np.concatenate(y_train_offline, axis=0)
print('run {} training'.format(run))
print('size: {}, {}'.format(x_train_offline.shape, y_train_offline.shape))
agent.train_learner(x_train_offline, y_train_offline)
acc_array = agent.evaluate(test_loaders)
tmp_acc.append(acc_array) |
class NERTransformer(BaseTransformer):
mode = 'token-classification'
def __init__(self, hparams):
if (type(hparams) == dict):
hparams = Namespace(**hparams)
module = import_module('tasks')
try:
token_classification_task_clazz = getattr(module, hparams.task_type)
self.token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. Available tasks classes are: {TokenClassificationTask.__subclasses__()}')
self.labels = self.token_classification_task.get_labels(hparams.labels)
self.pad_token_label_id = CrossEntropyLoss().ignore_index
super().__init__(hparams, len(self.labels), self.mode)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_num):
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (self.config.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (self.config.model_type in ['bert', 'xlnet']) else None)
outputs = self(**inputs)
loss = outputs[0]
return {'loss': loss}
def prepare_data(self):
args = self.hparams
for mode in ['train', 'dev', 'test']:
cached_features_file = self._feature_file(mode)
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = self.token_classification_task.read_examples_from_file(args.data_dir, mode)
features = self.token_classification_task.convert_examples_to_features(examples, self.labels, args.max_seq_length, self.tokenizer, cls_token_at_end=bool((self.config.model_type in ['xlnet'])), cls_token=self.tokenizer.cls_token, cls_token_segment_id=(2 if (self.config.model_type in ['xlnet']) else 0), sep_token=self.tokenizer.sep_token, sep_token_extra=False, pad_on_left=bool((self.config.model_type in ['xlnet'])), pad_token=self.tokenizer.pad_token_id, pad_token_segment_id=self.tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
def get_dataloader(self, mode: int, batch_size: int, shuffle: bool=False) -> DataLoader:
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if (features[0].token_type_ids is not None):
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
else:
all_token_type_ids = torch.tensor([0 for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids), batch_size=batch_size)
def validation_step(self, batch, batch_nb):
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (self.config.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (self.config.model_type in ['bert', 'xlnet']) else None)
outputs = self(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
return {'val_loss': tmp_eval_loss.detach().cpu(), 'pred': preds, 'target': out_label_ids}
def _eval_end(self, outputs):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
preds = np.concatenate([x['pred'] for x in outputs], axis=0)
preds = np.argmax(preds, axis=2)
out_label_ids = np.concatenate([x['target'] for x in outputs], axis=0)
label_map = {i: label for (i, label) in enumerate(self.labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if (out_label_ids[(i, j)] != self.pad_token_label_id):
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(out_label_list, preds_list), 'precision': precision_score(out_label_list, preds_list), 'recall': recall_score(out_label_list, preds_list), 'f1': f1_score(out_label_list, preds_list)}
ret = {k: v for (k, v) in results.items()}
ret['log'] = results
return (ret, preds_list, out_label_list)
def validation_epoch_end(self, outputs):
(ret, preds, targets) = self._eval_end(outputs)
logs = ret['log']
return {'val_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
def test_epoch_end(self, outputs):
(ret, predictions, targets) = self._eval_end(outputs)
logs = ret['log']
return {'avg_test_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
parser.add_argument('--task_type', default='NER', type=str, help='Task type to fine tune in training (e.g. NER, POS, etc)')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--labels', default='', type=str, help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.')
parser.add_argument('--gpus', default=0, type=int, help='The number of GPUs allocated for this, it is by default 0 meaning none')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
return parser |
def train_self_play(results_dir, scenario_name, print_train_results=True):
scenario: PSROScenario = scenario_catalog.get(scenario_name=scenario_name)
env_class = scenario.env_class
env_config = scenario.env_config
trainer_class = scenario.trainer_class
policy_classes: Dict[(str, Type[Policy])] = scenario.policy_classes
single_agent_symmetric_game = scenario.single_agent_symmetric_game
if single_agent_symmetric_game:
raise NotImplementedError
get_trainer_config = scenario.get_trainer_config
should_log_result_fn = scenario.ray_should_log_result_filter
checkpoint_every_n_iters = 500
class PreAndPostEpisodeCallbacks(DefaultCallbacks):
def on_train_result(self, *, trainer, result: dict, **kwargs):
result['scenario_name'] = trainer.scenario_name
training_iteration = result['training_iteration']
super().on_train_result(trainer=trainer, result=result, **kwargs)
if (((training_iteration % checkpoint_every_n_iters) == 0) or (training_iteration == 1)):
for player in range(2):
checkpoint_metadata = create_metadata_with_new_checkpoint(policy_id_to_save=f'best_response_{player}', br_trainer=trainer, policy_player=player, save_dir=checkpoint_dir(trainer=trainer), timesteps_training=result['timesteps_total'], episodes_training=result['episodes_total'], checkpoint_name=f'best_response_player_{player}_iter_{training_iteration}.h5')
joint_pol_checkpoint_spec = StrategySpec(strategy_id=f'best_response_player_{player}_iter_{training_iteration}', metadata=checkpoint_metadata)
checkpoint_path = os.path.join(spec_checkpoint_dir(trainer), f'best_response_player_{player}_iter_{training_iteration}.json')
ensure_dir(checkpoint_path)
with open(checkpoint_path, '+w') as checkpoint_spec_file:
checkpoint_spec_file.write(joint_pol_checkpoint_spec.to_json())
def select_policy(agent_id):
if (agent_id == 0):
return 'best_response_0'
elif (agent_id == 1):
return 'best_response_1'
else:
raise ValueError(f'Unknown agent id: {agent_id}')
init_ray_for_scenario(scenario=scenario, head_address=None, logging_level=logging.INFO)
tmp_env = env_class(env_config=env_config)
trainer_config = {'callbacks': PreAndPostEpisodeCallbacks, 'env': env_class, 'env_config': env_config, 'gamma': 1.0, 'num_gpus': 0, 'num_workers': 0, 'num_envs_per_worker': 1, 'multiagent': {'policies_to_train': [f'best_response_0', 'best_response_1'], 'policies': {f'best_response_0': (policy_classes['best_response'], tmp_env.observation_space, tmp_env.action_space, {}), f'best_response_1': (policy_classes['best_response'], tmp_env.observation_space, tmp_env.action_space, {})}, 'policy_mapping_fn': select_policy}}
trainer_config = merge_dicts(trainer_config, get_trainer_config(tmp_env))
trainer = trainer_class(config=trainer_config, logger_creator=get_trainer_logger_creator(base_dir=results_dir, scenario_name=scenario_name, should_log_result_fn=should_log_result_fn))
trainer.scenario_name = scenario_name
while True:
train_iter_results = trainer.train()
if print_train_results:
if ('hist_stats' in train_iter_results):
del train_iter_results['hist_stats']
for key in ['best_response_0', 'best_response_1']:
if ('td_error' in train_iter_results['info']['learner'][key]):
del train_iter_results['info']['learner'][key]['td_error']
print(pretty_dict_str(train_iter_results)) |
class CTViTTrainer(nn.Module):
def __init__(self, vae: CTViT, *, num_train_steps, batch_size, folder, train_on_images=False, num_frames=17, lr=3e-05, grad_accum_every=1, wd=0.0, max_grad_norm=0.5, discr_max_grad_norm=None, save_results_every=50, save_model_every=250, results_folder='./results', valid_frac=0.05, random_split_seed=42, use_ema=True, ema_beta=0.995, ema_update_after_step=0, ema_update_every=1, apply_grad_penalty_every=4, accelerate_kwargs: dict=dict()):
super().__init__()
image_size = vae.image_size
self.accelerator = Accelerator(**accelerate_kwargs)
self.vae = vae
self.use_ema = use_ema
if (self.is_main and use_ema):
self.ema_vae = EMA(vae, update_after_step=ema_update_after_step, update_every=ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
all_parameters = set(vae.parameters())
discr_parameters = set(vae.discr.parameters())
vae_parameters = (all_parameters - discr_parameters)
self.vae_parameters = vae_parameters
self.optim = get_optimizer(vae_parameters, lr=lr, wd=wd)
self.discr_optim = get_optimizer(discr_parameters, lr=(lr * 0.01), wd=wd)
self.max_grad_norm = max_grad_norm
self.discr_max_grad_norm = discr_max_grad_norm
print('This is a test.')
dataset_klass = (ImageDataset if train_on_images else VideoDataset)
if train_on_images:
self.ds = ImageDataset(folder, image_size)
else:
self.ds = VideoDataset(folder, image_size, num_frames=num_frames)
self.valid_frac = 0.05
random_split_seed = 42
if (valid_frac > 0):
train_size = int(((1 - valid_frac) * len(self.ds)))
valid_size = (len(self.ds) - train_size)
(self.ds, self.valid_ds) = random_split(self.ds, [train_size, valid_size], generator=torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
batch_sampler_train = CustomBatchSampler(self.ds, batch_size=batch_size, drop_last=False)
batch_sampler_val = CustomBatchSampler(self.valid_ds, batch_size=batch_size, drop_last=False)
list_train = []
list_val = []
for i in range(len(self.ds)):
list_train.append(self.ds.dataset.paths[self.ds.indices[i]])
for i in range(len(self.valid_ds)):
list_val.append(self.valid_ds.dataset.paths[self.valid_ds.indices[i]])
with open('train.txt', 'w') as f:
for item in list_train:
f.write((str(item) + '\n'))
with open('valid.txt', 'w') as f:
for item in list_val:
f.write((str(item) + '\n'))
self.dl = DataLoader(self.ds, batch_size=batch_size, shuffle=True, num_workers=1)
self.valid_dl = DataLoader(self.valid_ds, batch_size=batch_size, shuffle=True, num_workers=1)
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
(self.vae, self.optim, self.discr_optim, self.dl_iter, self.valid_dl_iter) = self.accelerator.prepare(self.vae, self.optim, self.discr_optim, self.dl_iter, self.valid_dl_iter)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if ((len([*self.results_folder.glob('**/*')]) > 0) and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents=True, exist_ok=True)
def save(self, path):
if (not self.accelerator.is_local_main_process):
return
pkg = dict(model=self.accelerator.get_state_dict(self.vae), optim=self.optim.state_dict(), discr_optim=self.discr_optim.state_dict())
torch.save(pkg, path)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(path)
vae = self.accelerator.unwrap_model(self.vae)
vae.load_state_dict(pkg['model'])
self.optim.load_state_dict(pkg['optim'])
self.discr_optim.load_state_dict(pkg['discr_optim'])
def print(self, msg):
self.accelerator.print(msg)
def device(self):
return self.accelerator.device
def is_distributed(self):
return (not ((self.accelerator.distributed_type == DistributedType.NO) and (self.accelerator.num_processes == 1)))
def is_main(self):
return self.accelerator.is_main_process
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
device = torch.device('cuda')
steps = int(self.steps.item())
apply_grad_penalty = (not (steps % self.apply_grad_penalty_every))
self.vae.train()
logs = {}
for i in range(3):
for _ in range(self.grad_accum_every):
img = next(self.dl_iter)
device = torch.device('cuda')
img = img.to(device)
with self.accelerator.autocast():
loss = self.vae(img, apply_grad_penalty=apply_grad_penalty)
self.accelerator.backward((loss / self.grad_accum_every))
accum_log(logs, {'loss': (loss.item() / self.grad_accum_every)})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.vae.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
if exists(self.vae.discr):
self.discr_optim.zero_grad()
for _ in range(self.grad_accum_every):
img = next(self.dl_iter)
device = torch.device('cuda')
img = img.to(device)
with self.accelerator.autocast():
loss = self.vae(img, return_discr_loss=True)
self.accelerator.backward((loss / self.grad_accum_every))
accum_log(logs, {'discr_loss': (loss.item() / self.grad_accum_every)})
if exists(self.discr_max_grad_norm):
self.accelerator.clip_grad_norm_(self.vae.discr.parameters(), self.discr_max_grad_norm)
self.discr_optim.step()
self.print(f"{steps}: vae loss: {logs['loss']} - discr loss: {logs['discr_loss']}")
if (self.is_main and self.use_ema):
self.ema_vae.update()
if (self.is_main and (not (steps % self.save_results_every))):
vaes_to_evaluate = ((self.vae, str(steps)),)
if self.use_ema:
vaes_to_evaluate = (((self.ema_vae.ema_model, f'{steps}.ema'),) + vaes_to_evaluate)
for (model, filename) in vaes_to_evaluate:
model.eval()
valid_data = next(self.valid_dl_iter)
is_video = (valid_data.ndim == 5)
device = torch.device('cuda')
valid_data = valid_data.to(device)
recons = model(valid_data, return_recons_only=True)
if is_video:
sampled_videos_path = (self.results_folder / f'samples.{filename}')
sampled_videos_path.mkdir(parents=True, exist_ok=True)
i = 0
for tensor in recons.unbind(dim=0):
tensor_to_nifti(tensor, str((sampled_videos_path / f'{filename}_{i}.nii.gz')))
i = (i + 1)
else:
imgs_and_recons = torch.stack((valid_data, recons), dim=0)
imgs_and_recons = rearrange(imgs_and_recons, 'r b ... -> (b r) ...')
imgs_and_recons = imgs_and_recons.detach().cpu().float().clamp(0.0, 1.0)
grid = make_grid(imgs_and_recons, nrow=2, normalize=True, value_range=(0, 1))
logs['reconstructions'] = grid
save_image(grid, str((self.results_folder / f'{filename}.png')))
self.print(f'{steps}: saving to {str(self.results_folder)}')
if (self.is_main and (not (steps % self.save_model_every))):
state_dict = self.vae.state_dict()
model_path = str((self.results_folder / f'vae.{steps}.pt'))
torch.save(state_dict, model_path)
if self.use_ema:
ema_state_dict = self.ema_vae.state_dict()
model_path = str((self.results_folder / f'vae.{steps}.ema.pt'))
torch.save(ema_state_dict, model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn=noop):
device = next(self.vae.parameters()).device
device = torch.device('cuda')
while (self.steps < self.num_train_steps):
logs = self.train_step()
log_fn(logs)
self.print('training complete') |
def generate_hash(n_points, d, b, h):
torch.manual_seed(0)
x = torch.rand(n_points, d).cuda()
a = torch.randn(b, (d + 1)).cuda()
compute_hashes(x, a, h)
return h |
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train, num_epochs):
deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5'))
history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0) |
def prepare_static_timestepping():
static_timestepping_func = None
if (not master):
if bcast():
static_timestepping_func = (lambda a=(- 1): bcast())
return static_timestepping_func
apply_static_timestepping = False
if (static_timestepping is None):
pass
elif isinstance(static_timestepping, str):
if os.path.exists(static_timestepping):
if os.path.isdir(static_timestepping):
abort(f'Supplied static_timestepping = "{static_timestepping}" is a directory, not a file')
apply_static_timestepping = True
(static_timestepping_a, static_timestepping_a) = np.loadtxt(static_timestepping, unpack=True)
static_timestepping_a = static_timestepping_a.copy()
static_timestepping_a = static_timestepping_a.copy()
static_timestepping_data = collections.defaultdict(list)
for (a, a) in zip(static_timestepping_a, static_timestepping_a):
static_timestepping_data[a].append(a)
for a_list in static_timestepping_data.values():
a_list.reverse()
(static_timestepping_a, static_timestepping_a) = remove_doppelgangers(static_timestepping_a, static_timestepping_a, rel_tol=t_reltol)
mask = (np.diff(static_timestepping_a) < 0)
for index in range(1, len(mask)):
mask[index] &= (not mask[(index - 1)])
mask[(- 1)] = False
interval_indices = list((np.where(mask)[0] + 1))
a_intervals = []
a_right = 0
for index in interval_indices:
(a_left, a_right) = (a_right, static_timestepping_a[index])
a_intervals.append((a_left, a_right))
interval_indices.append(static_timestepping_a.shape[0])
(a_left, a_right) = (a_right, )
a_intervals.append((a_left, a_right))
static_timestepping_interps = []
index_left = 0
import scipy.interpolate
for index_right in interval_indices:
static_timestepping_interps.append((lambda a, *, f=scipy.interpolate.interp1d(np.log(static_timestepping_a[index_left:index_right]), np.log(static_timestepping_a[index_left:index_right]), 'linear', fill_value='extrapolate'): exp(float(f(log(a))))))
index_left = index_right
def static_timestepping_func(a=(- 1)):
if (a == (- 1)):
(a, t) = (universals.a, universals.t)
else:
t = cosmic_time(a)
n = int(ceil((log10((1 / t_reltol)) + 0.5)))
a_list = static_timestepping_data.get(float(f'{{:.{n}e}}'.format(a)))
if a_list:
a = a_list.pop()
else:
for ((a_left, a_right), static_timestepping_interp) in zip(a_intervals, static_timestepping_interps):
if ((a_right != ) and isclose(float(a), float(a_right))):
continue
if isclose(float(a), float((a_left + machine_))):
a = a_left
if (a_left <= a < a_right):
break
else:
abort(f'static_timestepping_func(): a = {a} not in any interval')
a = static_timestepping_interp(a)
a_next = (a + a)
t = ((cosmic_time(a_next) - t) if (a_next <= 1) else )
return bcast(t)
masterprint(f'Static time-stepping information will be read from "{static_timestepping}"')
else:
static_timestepping_dir = os.path.dirname(static_timestepping)
if static_timestepping_dir:
os.makedirs(static_timestepping_dir, exist_ok=True)
masterprint(f'Static time-stepping information will be written to "{static_timestepping}"')
elif callable(static_timestepping):
apply_static_timestepping = True
def static_timestepping_func(a=(- 1)):
if (a == (- 1)):
(a, t) = (universals.a, universals.t)
else:
t = cosmic_time(a)
a = static_timestepping(a)
a_next = (a + a)
t = ((cosmic_time(a_next) - t) if (a_next <= 1) else )
return bcast(t)
masterprint('Static time-stepping configured using supplied function')
else:
abort(f'Could not interpret static_timestepping = {static_timestepping} of type {type(static_timestepping)}')
bcast(apply_static_timestepping)
return static_timestepping_func |
def make_conf_nll_loss_evaluator(cfg):
default_args = cfg.model.cmn.losses.nll_loss.copy()
default_args.update(sparse=cfg.data.sparse)
default_args.pop('weight')
return ConfidenceNllLoss(**default_args) |
def _tensor_to_tensorinfo(tensor):
tensor_info = {}
if isinstance(tensor, sparse_tensor.SparseTensor):
tensor_info['is_dense'] = False
tensor_info['values'] = _tensor_to_map(tensor.values)
tensor_info['indices'] = _tensor_to_map(tensor.indices)
tensor_info['dense_shape'] = _tensor_to_map(tensor.dense_shape)
else:
tensor_info['is_dense'] = True
tensor_info.update(_tensor_to_map(tensor))
return tensor_info |
class StopWatch(object):
def __init__(self):
self.reset()
def reset(self):
self.timings = OrderedDict()
self.starts = {}
def toogle(self, name):
if (name in self.starts):
self.stop(name)
else:
self.start(name)
def start(self, name):
self.starts[name] = time.time()
def stop(self, name):
tic = time.time()
if (name not in self.timings):
self.timings[name] = []
diff = (tic - self.starts.pop(name, tic))
self.timings[name].append(diff)
return diff
def get(self, name=None, reduce=np.sum):
if (name is not None):
return reduce(self.timings[name])
else:
ret = {}
for k in self.timings:
ret[k] = reduce(self.timings[k])
return ret
def format_str(self, reduce=np.sum):
return ', '.join([f'{k}: {format_seconds(v)}' for (k, v) in self.get(reduce=reduce).items()])
def __repr__(self):
return self.format_str()
def __str__(self):
return self.format_str() |
def read_MR(path, seed=1234):
file_path = os.path.join(path, 'rt-polarity.all')
(data, labels) = read_corpus(file_path, encoding='latin-1')
random.seed(seed)
perm = list(range(len(data)))
random.shuffle(perm)
data = [data[i] for i in perm]
labels = [labels[i] for i in perm]
return (data, labels) |
def euclidean_squared_distance(input1, input2):
(m, n) = (input1.size(0), input2.size(0))
mat1 = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n)
mat2 = torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat = (mat1 + mat2)
distmat.addmm_(1, (- 2), input1, input2.t())
return distmat |
def test_array(doc):
l = m.cast_array()
assert (l == [1, 2])
assert m.load_array(l)
assert (doc(m.cast_array) == 'cast_array() -> List[int[2]]')
assert (doc(m.load_array) == 'load_array(arg0: List[int[2]]) -> bool') |
def get_closest(code_line, project_type):
if (code_line == ''):
return ''
idx_path = ('./retrieval/%s/lucene_index_bline2fline' % project_type.lower())
closest_line = find_top(code_line, idx_path)
if (closest_line == None):
closest_line = ''
return closest_line |
class CG(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, max_z, train_dataset, use_feature=False, node_embedding=None, dropout=0.5, jk=True, train_eps=False):
super(CG, self).__init__()
self.use_feature = use_feature
self.node_embedding = node_embedding
self.max_z = max_z
self.z_embedding = Embedding(self.max_z, hidden_channels)
self.jk = jk
self.num_edge_features = train_dataset.data.edge_attr.shape[1]
print(self.num_edge_features)
initial_channels = hidden_channels
if self.use_feature:
initial_channels += train_dataset.num_features
if (self.node_embedding is not None):
initial_channels += node_embedding.embedding_dim
self.conv1 = CGConv((initial_channels, hidden_channels), self.num_edge_features)
self.convs = torch.nn.ModuleList()
for i in range((num_layers - 1)):
self.convs.append(CGConv((hidden_channels, hidden_channels), self.num_edge_features))
self.dropout = dropout
if self.jk:
self.lin1 = Linear((num_layers * hidden_channels), hidden_channels)
else:
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, 1)
def forward(self, z, edge_index, batch, x=None, edge_weight=None, node_id=None, edge_attr=None):
z_emb = self.z_embedding(z)
if (z_emb.ndim == 3):
z_emb = z_emb.sum(dim=1)
if (self.use_feature and (x is not None)):
x = torch.cat([z_emb, x.to(torch.float)], 1)
else:
x = z_emb
if ((self.node_embedding is not None) and (node_id is not None)):
n_emb = self.node_embedding(node_id)
x = torch.cat([x, n_emb], 1)
x = self.conv1(x, edge_index, edge_attr)
xs = [x]
for conv in self.convs:
x = conv(x, edge_index, edge_attr)
xs += [x]
if self.jk:
x = global_mean_pool(torch.cat(xs, dim=1), batch)
else:
x = global_mean_pool(xs[(- 1)], batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
return x |
def softmax_dropout(x: torch.Tensor, p: float, mask: Optional[torch.Tensor]=None, causal: bool=False, mask_type: str='qk') -> torch.Tensor:
if (p == 0.0):
return softmax(x, mask=mask, mask_type=mask_type)
else:
return _softmax_dropout_dispatch(x, p, mask, causal, mask_type=mask_type) |
class Optimizer(object):
def __init__(self, opt_name, parameters, lr, clip_grad_norm=None):
opt_name = opt_name.lower().replace('_', '').strip()
if (opt_name == 'sgd'):
optimizer = opt.SGD
elif (opt_name == 'rmsprop'):
optimizer = opt.RMSprop
elif (opt_name == 'adam'):
optimizer = opt.Adam
self.parameters = list(parameters)
if (self.parameters == []):
self.parameters = [Variable(torch.zeros(1), requires_grad=True)]
self.opt = optimizer(self.parameters, lr=lr)
self.clip_grad_norm = clip_grad_norm
self.stored_grads = None
self.zero_stored_grad()
self._n_iter = 0
def collect(self):
for (ind, param) in enumerate(self.parameters):
if (param.grad is not None):
self.stored_grads[ind] += param.grad
def step(self):
assert (self._n_iter > 0), 'The optimizer does not have gradients to apply.'
for (ind, param) in enumerate(self.parameters):
param.grad = (self.stored_grads[ind] / self._n_iter)
if self.clip_grad_norm:
torch.nn.utils.clip_grad_norm(self.parameters, self.clip_grad_norm)
self.opt.step()
self._n_iter = 0
self.zero_stored_grad()
self.zero_current_grad()
def step_iter(self, n_steps=1):
self._n_iter += n_steps
def zero_stored_grad(self):
self.stored_grads = [Variable(param.data.new(param.size()).zero_()) for param in self.parameters]
def zero_current_grad(self):
self.opt.zero_grad() |
class _Transition(nn.Module):
def __init__(self):
super(_Transition, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.pool(x)
return x |
class XLMRobertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs):
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLMRobertaTokenizer: install sentencepiece')
raise
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.fairseq_tokens_to_ids['<mask>'] = (len(self.sp_model) + self.fairseq_offset)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLMRobertaTokenizer: install sentencepiece')
raise
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return ((len(self.sp_model) + self.fairseq_offset) + 1)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
def _Graph_fromSDString(s: str, options: MDLOptions=MDLOptions(), add: bool=True) -> List[Graph]:
return _graphsLoad(_Graph_fromSDString_orig(s, options), add) |
class DatasetCatalog(object):
human = cfg.human
dataset_attrs = {'Human{}_0001_Train'.format(human): {'data_root': 'data/zju_mocap/CoreView_{}'.format(human), 'human': 'CoreView_{}'.format(human), 'ann_file': 'data/zju_mocap/CoreView_{}/annots.npy'.format(human), 'split': 'train'}, 'Human{}_0001_Test'.format(human): {'data_root': 'data/zju_mocap/CoreView_{}'.format(human), 'human': 'CoreView_{}'.format(human), 'ann_file': 'data/zju_mocap/CoreView_{}/annots.npy'.format(human), 'split': 'test'}, 'Human362_0001_Train': {'data_root': 'data/zju_mocap/CoreView_362', 'human': 'CoreView_362', 'ann_file': 'data/zju_mocap/CoreView_362/annots.npy', 'split': 'train'}, 'Human362_0001_Test': {'data_root': 'data/zju_mocap/CoreView_362', 'human': 'CoreView_362', 'ann_file': 'data/zju_mocap/CoreView_362/annots.npy', 'split': 'test'}, 'Human326_0001_Train': {'data_root': 'data/zju_mocap/CoreView_326', 'human': 'CoreView_326', 'ann_file': 'data/zju_mocap/CoreView_326/annots.npy', 'split': 'train'}, 'Human326_0001_Test': {'data_root': 'data/zju_mocap/CoreView_326', 'human': 'CoreView_326', 'ann_file': 'data/zju_mocap/CoreView_326/annots.npy', 'split': 'test'}, 'Human302_0001_Train': {'data_root': 'data/zju_mocap/CoreView_302', 'human': 'CoreView_302', 'ann_file': 'data/zju_mocap/CoreView_302/annots.npy', 'split': 'train'}, 'Human302_0001_Test': {'data_root': 'data/zju_mocap/CoreView_302', 'human': 'CoreView_302', 'ann_file': 'data/zju_mocap/CoreView_302/annots.npy', 'split': 'test'}, 'Human329_0001_Train': {'data_root': 'data/zju_mocap/CoreView_329', 'human': 'CoreView_329', 'ann_file': 'data/zju_mocap/CoreView_329/annots.npy', 'split': 'train'}, 'Human329_0001_Test': {'data_root': 'data/zju_mocap/CoreView_329', 'human': 'CoreView_329', 'ann_file': 'data/zju_mocap/CoreView_329/annots.npy', 'split': 'test'}, 'Female_1_casual_Train': {'data_root': 'data/people_snapshot/female-1-casual', 'split': 'train'}, 'Female_1_casual_Test': {'data_root': 'data/people_snapshot/female-1-casual', 'split': 'test'}, 'Female_3_casual_Train': {'data_root': 'data/people_snapshot/female-3-casual', 'split': 'train'}, 'Female_3_casual_Test': {'data_root': 'data/people_snapshot/female-3-casual', 'split': 'test'}, 'Male_2_casual_Train': {'data_root': 'data/people_snapshot/male-2-casual', 'split': 'train'}, 'Male_2_casual_Test': {'data_root': 'data/people_snapshot/male-2-casual', 'split': 'test'}, 'Female_4_casual_Train': {'data_root': 'data/people_snapshot/female-4-casual', 'split': 'train'}, 'Female_4_casual_Test': {'data_root': 'data/people_snapshot/female-4-casual', 'split': 'test'}, 'Male_3_casual_Train': {'data_root': 'data/people_snapshot/male-3-casual', 'split': 'train'}, 'Male_3_casual_Test': {'data_root': 'data/people_snapshot/male-3-casual', 'split': 'test'}, 'Male_5_outdoor_Train': {'data_root': 'data/people_snapshot/male-5-outdoor', 'split': 'train'}, 'Male_5_outdoor_Test': {'data_root': 'data/people_snapshot/male-5-outdoor', 'split': 'test'}, 'Male_2_outdoor_Train': {'data_root': 'data/people_snapshot/male-2-outdoor', 'split': 'train'}, 'Male_2_outdoor_Test': {'data_root': 'data/people_snapshot/male-2-outdoor', 'split': 'test'}, 'Female_8_plaza_Train': {'data_root': 'data/people_snapshot/female-8-plaza', 'split': 'train'}, 'Female_8_plaza_Test': {'data_root': 'data/people_snapshot/female-8-plaza', 'split': 'test'}, 'Female_6_plaza_Train': {'data_root': 'data/people_snapshot/female-6-plaza', 'split': 'train'}, 'Female_6_plaza_Test': {'data_root': 'data/people_snapshot/female-6-plaza', 'split': 'test'}, 'Female_7_plaza_Train': {'data_root': 'data/people_snapshot/female-7-plaza', 'split': 'train'}, 'Female_7_plaza_Test': {'data_root': 'data/people_snapshot/female-7-plaza', 'split': 'test'}, 'H36M_S9P_Train': {'data_root': 'data/h36m/S9/Posing', 'split': 'train'}, 'H36M_S9P_Test': {'data_root': 'data/h36m/S9/Posing', 'split': 'test'}, 'H36M_S11G_Train': {'data_root': 'data/h36m/S11/Greeting', 'split': 'train'}, 'H36M_S11G_Test': {'data_root': 'data/h36m/S11/Greeting', 'split': 'test'}}
def get(name):
attrs = DatasetCatalog.dataset_attrs[name]
return attrs.copy() |
_module()
class CPM(BaseBackbone):
def __init__(self, in_channels, out_channels, feat_channels=128, middle_channels=32, num_stages=6, norm_cfg=dict(type='BN', requires_grad=True)):
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
assert (in_channels == 3)
self.num_stages = num_stages
assert (self.num_stages >= 1)
self.stem = nn.Sequential(ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ConvModule(128, 32, 5, padding=2, norm_cfg=norm_cfg), ConvModule(32, 512, 9, padding=4, norm_cfg=norm_cfg), ConvModule(512, 512, 1, padding=0, norm_cfg=norm_cfg), ConvModule(512, out_channels, 1, padding=0, act_cfg=None))
self.middle = nn.Sequential(ConvModule(in_channels, 128, 9, padding=4, norm_cfg=norm_cfg), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ConvModule(128, 128, 9, padding=4, norm_cfg=norm_cfg), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.cpm_stages = nn.ModuleList([CpmBlock((middle_channels + out_channels), feat_channels, norm_cfg) for _ in range((num_stages - 1))])
self.middle_conv = nn.ModuleList([nn.Sequential(ConvModule(128, middle_channels, 5, padding=2, norm_cfg=norm_cfg)) for _ in range((num_stages - 1))])
self.out_convs = nn.ModuleList([nn.Sequential(ConvModule(feat_channels, feat_channels, 1, padding=0, norm_cfg=norm_cfg), ConvModule(feat_channels, out_channels, 1, act_cfg=None)) for _ in range((num_stages - 1))])
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
stage1_out = self.stem(x)
middle_out = self.middle(x)
out_feats = []
out_feats.append(stage1_out)
for ind in range((self.num_stages - 1)):
single_stage = self.cpm_stages[ind]
out_conv = self.out_convs[ind]
inp_feat = torch.cat([out_feats[(- 1)], self.middle_conv[ind](middle_out)], 1)
cpm_feat = single_stage(inp_feat)
out_feat = out_conv(cpm_feat)
out_feats.append(out_feat)
return out_feats |
def _get_config_module(fname):
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod |
def main(rank, device_count, world_size, cfg):
setup(rank, world_size)
device_id = (rank % device_count)
device = torch.device(f'cuda:{device_id}')
torch.cuda.set_device(device_id)
if ('kitti' in cfg.DATASET):
tracklet_anns = KittiLoader.load_all_annotations(cfg.DATA_DIR, 'test')
tracklet_lengths = [len(ann) for ann in tracklet_anns]
sorted_tracklet_idx = sorted(range(len(tracklet_lengths)), key=(lambda k: tracklet_lengths[k]), reverse=True)
assign_idx = get_balanced_index(sorted_tracklet_idx, tracklet_lengths, world_size)
id_dataloader = assign_idx[rank]
else:
id_dataset = ObjectListDataset(cfg.OBJECT_LIST_PATH, cfg.OUTPUT_DIR)
sampler = DistributedSampler(id_dataset)
id_dataloader = DataLoader(id_dataset, batch_size=1, num_workers=0, sampler=sampler)
model = PointSDFModel(cfg.SHAPE_MODEL.CODE_DIM, cfg.SHAPE_MODEL.HIDDEN_DIM, cfg.SHAPE_MODEL.POINT_FEAT_DIMS, cfg.SHAPE_MODEL.DECODER_DIMS, cfg.SHAPE_MODEL.USE_RES_DECODER)
stade_dict = torch.load(cfg.CKPT_PATH, map_location=f'cuda:{device_id}')['model']
stade_dict = {k.replace('module.', ''): stade_dict[k] for k in stade_dict}
model.load_state_dict(stade_dict)
model = model.to(device)
for id in id_dataloader:
begin = time.time()
try:
seed_torch()
if ('kitti' in cfg.DATASET):
id = [id]
result_list = deep_sot(id[0], model, cfg, device=device)
if (len(result_list) > 0):
iou_list = [r['iou_3d'] for r in result_list]
print('id:', id[0], 'mean iou:', np.stack(iou_list).mean(), 'cost time:', (time.time() - begin))
except Exception as e:
print('error id:', id[0], '')
print(e)
cleanup() |
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *args):
assert ((len(args) == 2) or (isinstance(args[0], (list, tuple)) and (len(args[0]) == 2))), 'Two arguments must be specified, an image and a corresponding label'
input = (list(args) if (len(args) > 1) else args[0])
for t in self.transforms:
if isinstance(t, SegTransform):
input = list(t(*input))
else:
input[0] = call_recursive(t, input[0])
return tuple(input)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string |
class ConvSeq3x3Branch(nn.Module):
def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list):
super(ConvSeq3x3Branch, self).__init__()
self.conv_list = nn.Sequential()
for (i, (out_channels, kernel_size, strides, padding)) in enumerate(zip(out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add_module('conv{}'.format((i + 1)), InceptConv(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding))
in_channels = out_channels
self.conv1x3 = InceptConv(in_channels=in_channels, out_channels=in_channels, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.conv3x1 = InceptConv(in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 1), stride=1, padding=(1, 0))
def forward(self, x):
x = self.conv_list(x)
y1 = self.conv1x3(x)
y2 = self.conv3x1(x)
x = torch.cat((y1, y2), dim=1)
return x |
def load_tf_model_weights(mdl, layer_lookup, tf_mdl_dir, is_resnet=True, arg_num=None):
tf.reset_default_graph()
with tf.Session() as sess:
(tf_layers, tf_params, tf_shapes) = import_tf_params(tf_mdl_dir, sess)
layer_info = get_layer_indices(layer_lookup, tf_layers)
for (layer_name, info) in layer_info.items():
print(f'Loading {info[0]}/* into {layer_name}')
weights = [tf_params[i] for i in info[2]]
layer = getattr(mdl, layer_name)
info[1](weights, layer)
test_loaded_params(mdl, tf_params, tf_layers)
if is_resnet:
compare_model_outputs(mdl, sess, torch.randn(5, 160, 160, 3).detach()) |
def extract_features(data_loader, attr_file, attr2idx_file, device, image_model, attribute_topk=8, batch_size=128):
model = EfficientNet.from_pretrained('efficientnet-b7')
ckpt = torch.load(image_model, map_location='cpu')
print('[INFO] Loading weights from {}'.format(image_model))
if ('model_state' in ckpt):
model.load_state_dict(ckpt['model_state'])
else:
model.load_state_dict(ckpt)
model = model.to(device)
model = model.eval()
with open(attr_file, 'r') as f:
predicted_attr = json.load(f)
with open(attr2idx_file, 'r') as f:
attr2idx = json.load(f)
num_data = len(data_loader)
num_batch = math.floor((num_data / batch_size))
asins = []
image_ft = []
attributes = []
def compute_features(data):
with torch.no_grad():
outs = model.extract_features(data)
outs = model._avg_pooling(outs)
outs = outs.flatten(start_dim=1)
image_ft_batch = model._dropout(outs)
return image_ft_batch
def compute_attribute_idx(asin_batch):
labels = [predicted_attr[asin[0]]['predict'][:attribute_topk] for asin in asin_batch]
attribute_idx = [[attr2idx[attr] for attr in label] for label in labels]
return attribute_idx
def append_batch(first, last):
batch_ids = torch.tensor([j for j in range(first, last)], dtype=torch.long, device=device)
[data, meta_info] = data_loader.get_items(batch_ids)
data = data.to(device)
image_ft_batch = compute_features(data)
image_ft.append(image_ft_batch)
asins.extend(meta_info)
attribute_idx = compute_attribute_idx(meta_info)
attributes.extend(attribute_idx)
for i in tqdm.tqdm(range(num_batch), ascii=True):
append_batch((i * batch_size), ((i + 1) * batch_size))
if ((num_batch * batch_size) < num_data):
append_batch((num_batch * batch_size), num_data)
image_ft = torch.cat(image_ft, dim=0).to('cpu')
attributes = torch.from_numpy(np.asarray(attributes, dtype=int))
features = {'asins': asins, 'image': image_ft, 'attribute': attributes}
return features |
class VoltageControlEnv(BaseEnvironment):
def __init__(self):
self._environment = VoltageControl()
self.possible_agents = [f'agent_{id}' for id in range(self._environment.get_num_of_agents())]
self.num_agents = len(self.possible_agents)
self._num_actions = self._environment.get_total_actions()
self.action_spaces = {agent: Box((- 1), 1, (self._num_actions,), 'float32') for agent in self.possible_agents}
self.observation_spaces = {agent: Box((- np.inf), np.inf, (50,), 'float32') for agent in self.possible_agents}
self.info_spec = {'state': np.zeros((144,), 'float32'), 'legals': {agent: np.zeros((1,), 'float32') for agent in self.possible_agents}}
def reset(self):
(observations, state) = self._environment.reset()
info = {'state': state.astype('float32'), 'legals': np.zeros((1,), 'float32')}
self._done = False
observations = self._convert_observations(observations, self._done)
return (observations, info)
def step(self, actions: Dict[(str, np.ndarray)]):
actions = self._preprocess_actions(actions)
(reward, done, _) = self._environment.step(actions)
rewards = {}
for agent in self.possible_agents:
rewards[agent] = np.array(reward, 'float32')
self._done = done
next_observations = self._environment.get_obs()
next_observations = self._convert_observations(next_observations, self._done)
state = self._environment.get_state().astype('float32')
info = {'state': state, 'legals': np.zeros((1,), 'float32')}
terminals = {agent: done for agent in self.possible_agents}
truncations = {agent: False for agent in self.possible_agents}
return (next_observations, rewards, terminals, truncations, info)
def _preprocess_actions(self, actions):
concat_action = []
for agent in self.possible_agents:
concat_action.append(actions[agent])
concat_action = np.concatenate(concat_action)
return concat_action
def _convert_observations(self, observations: List, done: bool):
dict_observations = {}
for (i, agent) in enumerate(self.possible_agents):
obs = np.array(observations[i], 'float32')
dict_observations[agent] = obs
return dict_observations |
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) |
class ZoeDepthNK(DepthModel):
def __init__(self, core, bin_conf, bin_centers_type='softplus', bin_embedding_dim=128, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, memory_efficient=False, train_midas=True, is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
super().__init__()
self.core = core
self.bin_conf = bin_conf
self.min_temp = min_temp
self.max_temp = max_temp
self.memory_efficient = memory_efficient
self.train_midas = train_midas
self.is_midas_pretrained = is_midas_pretrained
self.midas_lr_factor = midas_lr_factor
self.encoder_lr_factor = encoder_lr_factor
self.pos_enc_lr_factor = pos_enc_lr_factor
self.inverse_midas = inverse_midas
N_MIDAS_OUT = 32
btlnck_features = self.core.output_channels[0]
num_out_features = self.core.output_channels[1:]
self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0)
self.patch_transformer = PatchTransformerEncoder(btlnck_features, 1, 128, use_class_token=True)
self.mlp_classifier = nn.Sequential(nn.Linear(128, 128), nn.ReLU(), nn.Linear(128, 2))
if (bin_centers_type == 'normed'):
SeedBinRegressorLayer = SeedBinRegressor
Attractor = AttractorLayer
elif (bin_centers_type == 'softplus'):
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayerUnnormed
elif (bin_centers_type == 'hybrid1'):
SeedBinRegressorLayer = SeedBinRegressor
Attractor = AttractorLayerUnnormed
elif (bin_centers_type == 'hybrid2'):
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayer
else:
raise ValueError("bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
self.bin_centers_type = bin_centers_type
self.seed_bin_regressors = nn.ModuleDict({conf['name']: SeedBinRegressorLayer(btlnck_features, conf['n_bins'], mlp_dim=(bin_embedding_dim // 2), min_depth=conf['min_depth'], max_depth=conf['max_depth']) for conf in bin_conf})
self.seed_projector = Projector(btlnck_features, bin_embedding_dim, mlp_dim=(bin_embedding_dim // 2))
self.projectors = nn.ModuleList([Projector(num_out, bin_embedding_dim, mlp_dim=(bin_embedding_dim // 2)) for num_out in num_out_features])
self.attractors = nn.ModuleDict({conf['name']: nn.ModuleList([Attractor(bin_embedding_dim, n_attractors[i], mlp_dim=bin_embedding_dim, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type, memory_efficient=memory_efficient, min_depth=conf['min_depth'], max_depth=conf['max_depth']) for i in range(len(n_attractors))]) for conf in bin_conf})
last_in = N_MIDAS_OUT
self.conditional_log_binomial = nn.ModuleDict({conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) for conf in bin_conf})
def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
(b, c, h, w) = x.shape
self.orig_input_width = w
self.orig_input_height = h
(rel_depth, out) = self.core(x, denorm=denorm, return_rel_depth=True)
outconv_activation = out[0]
btlnck = out[1]
x_blocks = out[2:]
x_d0 = self.conv2(btlnck)
x = x_d0
embedding = self.patch_transformer(x)[0]
domain_logits = self.mlp_classifier(embedding)
domain_vote = torch.softmax(domain_logits.sum(dim=0, keepdim=True), dim=(- 1))
bin_conf_name = ['nyu', 'kitti'][torch.argmax(domain_vote, dim=(- 1)).squeeze().item()]
try:
conf = [c for c in self.bin_conf if (c.name == bin_conf_name)][0]
except IndexError:
raise ValueError(f'bin_conf_name {bin_conf_name} not found in bin_confs')
min_depth = conf['min_depth']
max_depth = conf['max_depth']
seed_bin_regressor = self.seed_bin_regressors[bin_conf_name]
(_, seed_b_centers) = seed_bin_regressor(x)
if ((self.bin_centers_type == 'normed') or (self.bin_centers_type == 'hybrid2')):
b_prev = ((seed_b_centers - min_depth) / (max_depth - min_depth))
else:
b_prev = seed_b_centers
prev_b_embedding = self.seed_projector(x)
attractors = self.attractors[bin_conf_name]
for (projector, attractor, x) in zip(self.projectors, attractors, x_blocks):
b_embedding = projector(x)
(b, b_centers) = attractor(b_embedding, b_prev, prev_b_embedding, interpolate=True)
b_prev = b
prev_b_embedding = b_embedding
last = outconv_activation
b_centers = nn.functional.interpolate(b_centers, last.shape[(- 2):], mode='bilinear', align_corners=True)
b_embedding = nn.functional.interpolate(b_embedding, last.shape[(- 2):], mode='bilinear', align_corners=True)
clb = self.conditional_log_binomial[bin_conf_name]
x = clb(last, b_embedding)
out = torch.sum((x * b_centers), dim=1, keepdim=True)
output = dict(domain_logits=domain_logits, metric_depth=out)
if (return_final_centers or return_probs):
output['bin_centers'] = b_centers
if return_probs:
output['probs'] = x
return output
def get_lr_params(self, lr):
param_conf = []
if self.train_midas:
def get_rel_pos_params():
for (name, p) in self.core.core.pretrained.named_parameters():
if ('relative_position' in name):
(yield p)
def get_enc_params_except_rel_pos():
for (name, p) in self.core.core.pretrained.named_parameters():
if ('relative_position' not in name):
(yield p)
encoder_params = get_enc_params_except_rel_pos()
rel_pos_params = get_rel_pos_params()
midas_params = self.core.core.scratch.parameters()
midas_lr_factor = (self.midas_lr_factor if self.is_midas_pretrained else 1.0)
param_conf.extend([{'params': encoder_params, 'lr': (lr / self.encoder_lr_factor)}, {'params': rel_pos_params, 'lr': (lr / self.pos_enc_lr_factor)}, {'params': midas_params, 'lr': (lr / midas_lr_factor)}])
remaining_modules = []
for (name, child) in self.named_children():
if (name != 'core'):
remaining_modules.append(child)
remaining_params = itertools.chain(*[child.parameters() for child in remaining_modules])
param_conf.append({'params': remaining_params, 'lr': lr})
return param_conf
def get_conf_parameters(self, conf_name):
params = []
for (name, child) in self.named_children():
if isinstance(child, nn.ModuleDict):
for (bin_conf_name, module) in child.items():
if (bin_conf_name == conf_name):
params += list(module.parameters())
return params
def freeze_conf(self, conf_name):
for p in self.get_conf_parameters(conf_name):
p.requires_grad = False
def unfreeze_conf(self, conf_name):
for p in self.get_conf_parameters(conf_name):
p.requires_grad = True
def freeze_all_confs(self):
for (name, child) in self.named_children():
if isinstance(child, nn.ModuleDict):
for (bin_conf_name, module) in child.items():
for p in module.parameters():
p.requires_grad = False
def build(midas_model_type='DPT_BEiT_L_384', pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs)
model = ZoeDepthNK(core, **kwargs)
if pretrained_resource:
assert isinstance(pretrained_resource, str), 'pretrained_resource must be a string'
model = load_state_from_resource(model, pretrained_resource)
return model
def build_from_config(config):
return ZoeDepthNK.build(**config) |
def make_network_cnn(num_outputs: int, mlp_units: Sequence[int], conv_n_channels: int) -> FeedForwardNetwork:
def network_fn(observation: Observation) -> chex.Array:
board = observation.board.astype(float)[(..., None)]
torso = hk.Sequential([hk.Conv2D(conv_n_channels, (2, 2), 1, padding='VALID'), jax.nn.relu, hk.Conv2D(conv_n_channels, (2, 2), 1, padding='VALID'), jax.nn.relu, hk.Conv2D(conv_n_channels, (2, 2), 1), jax.nn.relu, hk.Flatten()])
embedding = torso(board)
head = hk.nets.MLP((*mlp_units, num_outputs), activate_final=False)
if (num_outputs == 1):
return jnp.squeeze(head(embedding), axis=(- 1))
else:
logits = head(embedding)
masked_logits = jnp.where(observation.action_mask, logits, jnp.finfo(jnp.float32).min)
return masked_logits
(init, apply) = hk.without_apply_rng(hk.transform(network_fn))
return FeedForwardNetwork(init=init, apply=apply) |
class Parser(_Parser):
def find_tags(self, tokens, **kwargs):
if (kwargs.get('tagset') in (PENN, None)):
kwargs.setdefault('map', (lambda token, tag: (token, tag)))
if (kwargs.get('tagset') == UNIVERSAL):
kwargs.setdefault('map', (lambda token, tag: penntreebank2universal(token, tag)))
return _Parser.find_tags(self, tokens, **kwargs) |
class SegmentationDecoder(nn.Module):
def __init__(self, num_class=21, fc_dim=2048, pool_scales=(1, 2, 3, 6), task_type='C'):
super(SegmentationDecoder, self).__init__()
self.task_type = task_type
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(nn.AdaptiveAvgPool2d(scale), nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True)))
self.ppm = nn.ModuleList(self.ppm)
self.conv_last = nn.Sequential(nn.Conv2d((fc_dim + (len(pool_scales) * 512)), 512, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Conv2d(512, num_class, kernel_size=1))
def forward(self, conv_out, mask):
conv5 = conv_out[(- 1)]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.upsample(pool_scale(conv5), (input_size[2], input_size[3]), mode='bilinear'))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
if (self.task_type == 'C'):
x = nn.functional.log_softmax(x, dim=1)
return (x, mask) |
class MicroConverter():
def __init__(self, model_conf, net_def, model_weights, model_name, offset16=False, write_magic=False):
self.model_conf = model_conf
data_type = model_conf.get(ModelKeys.data_type, mace_pb2.DT_FLOAT)
if (model_conf.get(ModelKeys.quantize_schema) == 'int8'):
data_type = mace_pb2.DT_INT8
self.net_def = MicroIoConverter.convert(net_def, data_type)
self.model_weights = model_weights
self.model_name = model_name
self.offset16 = offset16
self.write_magic = write_magic
self.code_gen = MicroCodeGen()
self.np_data_type = data_type_to_np_dt(data_type, np.float32)
self.model_dir = (('micro/codegen/' + model_name) + '/')
util.mkdir_p(self.model_dir)
self.op_resolver = OpResolver(self.net_def, self.model_conf)
def gen_code_from_model(self, model_name, pb_model, model_weights):
net_def = pb_model
mem_computer = MemComputer(net_def, self.np_data_type)
tensor_mem_size = mem_computer.compute()
net_def_converter = ProtoConverter(self.offset16, self.write_magic, NetDefExcludeFields)
net_def_bytes = net_def_converter.proto_to_bytes(net_def)
mace_check((net_def_bytes is not None), 'proto_to_bytes failed.')
self.code_gen.gen_net_def_data(model_name, net_def_bytes, (self.model_dir + 'micro_net_def_data.h'))
(op_src_path_list, op_class_name_list, scratch_buffer_size) = self.op_resolver.get_op_desc_list_from_model()
self.code_gen.gen_ops_data(model_name, op_src_path_list, op_class_name_list, (self.model_dir + 'micro_ops_list.h'))
graph = GraphBuilder(net_def, self.op_resolver).build()
graph_converter = ProtoConverter(self.offset16, self.write_magic)
graph_bytes = graph_converter.proto_to_bytes(graph)
self.code_gen.gen_graph_data(model_name, graph_bytes, (self.model_dir + 'micro_graph_data.h'))
engine_data = {}
engine_data['tensor_mem_size'] = tensor_mem_size
engine_data['input_size'] = len(net_def.input_info)
engine_data['scratch_buffer_size'] = scratch_buffer_size
self.code_gen.gen_engin_config(model_name, engine_data, (self.model_dir + 'micro_engine_config.cc'))
tensor_bytes = bytearray(model_weights)
self.code_gen.gen_model_data(model_name, tensor_bytes, (self.model_dir + 'micro_model_data.h'))
net_def_bytes = bytearray(net_def_bytes)
graph_bytes = bytearray(graph_bytes)
model_bytes = tensor_bytes
offsets = np.zeros(6, dtype=np.int64)
offsets[0] = (offsets.size * 8)
offsets[1] = (offsets[0] + len(net_def_bytes))
offsets[2] = (offsets[1] + len(graph_bytes))
offsets[3] = (offsets[2] + len(model_bytes))
offsets[4] = tensor_mem_size
offsets[5] = scratch_buffer_size
offset_bytes = bytearray(offsets.tobytes())
const_mem_bytes = (((offset_bytes + net_def_bytes) + graph_bytes) + model_bytes)
if (not path.exists('.model')):
os.mkdir('.model')
model_bin = open(path.join('.model', (model_name + '.bin')), 'wb')
model_bin.write(const_mem_bytes)
def gen_engine_interface_code(self, model_name):
self.code_gen.gen_engine_factory(model_name, (self.model_dir + 'micro_engine_factory.h'), (self.model_dir + 'micro_engine_factory.cc'))
self.code_gen.gen_engine_c_interface(model_name, (self.model_dir + 'micro_engine_c_interface.h'), (self.model_dir + 'micro_engine_c_interface.cc'))
def gen_cmake_file(self, model_name):
self.code_gen.gen_cmake_file(model_name, (self.model_dir + 'CMakeLists.txt'))
def gen_code(self):
MicroOpConverter(self.net_def, self.model_weights, self.np_data_type).convert_op_params()
self.gen_code_from_model(self.model_name, self.net_def, self.model_weights)
self.gen_engine_interface_code(self.model_name)
self.gen_cmake_file(self.model_name)
def package(self, tar_package_path):
tmp_dir = '/tmp/micro'
tmp_workspace_file = 'WORKSPACE'
os.system(('mkdir -p %s && touch %s/%s' % (tmp_dir, tmp_dir, tmp_workspace_file)))
tar_command = 'tar --exclude=micro/tools'
tar_command += ' --exclude=micro/test'
tar_command += ' --exclude=micro/build'
tar_command += ' --exclude=micro/cmake'
tar_command += ' --exclude=micro/dockerfiles'
tar_command += ' --exclude=micro/examples'
tar_command += ' --exclude=micro/third_party'
tar_command += ' --exclude=micro/pretrained_models'
tar_command += (' -zcf ' + tar_package_path)
tar_command += (' micro -C %s %s' % (tmp_dir, tmp_workspace_file))
os.system(tar_command) |
class LombScargleAsyncProcess(GPUAsyncProcess):
def __init__(self, *args, **kwargs):
super(LombScargleAsyncProcess, self).__init__(*args, **kwargs)
self.nfft_proc = NFFTAsyncProcess(*args, **kwargs)
self._cpp_defs = self.nfft_proc._cpp_defs
self.real_type = self.nfft_proc.real_type
self.complex_type = self.nfft_proc.complex_type
self.block_size = self.nfft_proc.block_size
self.module_options = self.nfft_proc.module_options
self.use_double = self.nfft_proc.use_double
self.memory = None
self.nharmonics = kwargs.get('nharmonics', 1)
if (self.nharmonics > 1):
raise Exception('Only 1 harmonic is supported right now')
def _compile_and_prepare_functions(self, **kwargs):
module_text = _module_reader(find_kernel('lomb'), self._cpp_defs)
self.module = SourceModule(module_text, options=self.module_options)
self.dtypes = dict(lomb=[np.intp, np.intp, np.intp, np.intp, np.int32, self.real_type, self.real_type, np.int32, np.int32], lomb_dirsum=[np.intp, np.intp, np.intp, np.intp, np.intp, np.int32, np.int32, self.real_type, self.real_type, self.real_type, self.real_type, np.int32])
self.nfft_proc._compile_and_prepare_functions(**kwargs)
for (fname, dtype) in self.dtypes.items():
func = self.module.get_function(fname)
self.prepared_functions[fname] = func.prepare(dtype)
self.function_tuple = tuple((self.prepared_functions[fname] for fname in sorted(self.dtypes.keys())))
def memory_requirement(self, n0, nf, k0, nbatch=1, autoadjust_sigma=False, **kwargs):
H = self.nharmonics
sigma = self.nfft_proc.sigma
m = self.nfft_proc.get_m(nf)
if autoadjust_sigma:
sigma = int(np.round((float((sigma * (nf + k0))) / nf)))
fft_size = (H * (nf + k0))
mem += (3 * n0)
mem += nf
rsize = self.real_type(1).nbytes
csize = self.complex_type(1).nbytes
c = int(np.ceil((float(csize) / rsize)))
if kwargs.get('use_fft', True):
mem = ((c * sigma) * (fft_size - k0))
mem += ((c * sigma) * ((2 * fft_size) - k0))
mem += (((2 * n0) + (2 * m)) + 1)
if (H > 1):
mem += (((2 * H) ** 2) * nbatch)
mem += nbatch
mem *= rsize
return mem
def allocate_for_single_lc(self, t, y, dy, nf, k0=0, stream=None, **kwargs):
m = self.nfft_proc.get_m(nf)
sigma = self.nfft_proc.sigma
kwargs_lsmem = dict(use_double=self.use_double, nharmonics=self.nharmonics)
kwargs_lsmem.update(kwargs)
mem = LombScargleMemory(sigma, stream, m, k0=k0, **kwargs_lsmem)
mem.fromdata(t=t, y=y, dy=dy, nf=nf, allocate=True, **kwargs)
return mem
def preallocate(self, max_nobs, nlcs=1, nf=None, k0=None, freqs=None, streams=None, **kwargs):
if (freqs is not None):
k0 = get_k0(freqs)
nf = len(freqs)
if (nf is not None):
assert (k0 is not None)
m = self.nfft_proc.get_m(nf)
sigma = self.nfft_proc.sigma
self.memory = []
for i in range(nlcs):
stream = (None if (streams is None) else streams[i])
mem = LombScargleMemory(sigma, stream, m, k0=k0, buffered_transfer=True, n0_buffer=max_nobs, nf=nf, use_double=self.use_double, nharmonics=self.nharmonics, **kwargs)
mem.allocate(**kwargs)
self.memory.append(mem)
return self.memory
def autofrequency(self, *args, **kwargs):
return utils_autofreq(*args, **kwargs)
def _nfreqs(self, *args, **kwargs):
return len(self.autofrequency(*args, **kwargs))
def allocate(self, data, nfreqs=None, k0s=None, **kwargs):
if (len(data) > len(self.streams)):
self._create_streams((len(data) - len(self.streams)))
allocated_memory = []
nfrqs = nfreqs
k0 = k0s
if (nfrqs is None):
nfrqs = [self._nfreqs(t, **kwargs) for (t, y, dy) in data]
elif isinstance(nfreqs, int):
nfrqs = (nfrqs * np.ones(len(data)))
if (k0s is None):
k0s = ([1] * len(nfrqs))
elif isinstance(k0s, float):
k0s = ([k0s] * len(nfrqs))
for (i, ((t, y, dy), nf, k0)) in enumerate(zip(data, nfrqs, k0s)):
mem = self.allocate_for_single_lc(t, y, dy, nf, k0=k0, stream=self.streams[i], **kwargs)
allocated_memory.append(mem)
return allocated_memory
def run(self, data, use_fft=True, memory=None, freqs=None, **kwargs):
if ((not hasattr(self, 'prepared_functions')) or (not all([(func in self.prepared_functions) for func in ['lomb', 'lomb_dirsum']]))):
self._compile_and_prepare_functions(**kwargs)
frqs = freqs
if (frqs is None):
frqs = [self.autofrequency(d[0], **kwargs) for d in data]
elif isinstance(frqs[0], float):
frqs = ([frqs] * len(data))
assert (len(frqs) == len(data))
dfs = [(frq[1] - frq[0]) for frq in frqs]
k0s = [get_k0(frq) for frq in frqs]
[check_k0(frq, k0=k0) for (frq, k0) in zip(frqs, k0s)]
if (memory is None):
memory = self.memory
if (memory is None):
nfreqs = [len(frq) for frq in frqs]
memory = self.allocate(data, nfreqs=nfreqs, k0s=k0s, use_fft=use_fft, **kwargs)
else:
for (i, (t, y, dy)) in enumerate(data):
memory[i].set_gpu_arrays_to_zero(**kwargs)
memory[i].setdata(t=t, y=y, dy=dy, **kwargs)
ls_kwargs = dict(block_size=self.block_size, use_fft=use_fft)
ls_kwargs.update(kwargs)
funcs = (self.function_tuple, self.nfft_proc.function_tuple)
results = [lomb_scargle_async(memory[i], funcs, frqs[i], **ls_kwargs) for i in range(len(data))]
results = [(f, r) for (f, r) in zip(frqs, results)]
return results
def batched_run_const_nfreq(self, data, batch_size=10, use_fft=True, freqs=None, only_return_best_freqs=False, ignore_freq_mask=None, **kwargs):
if ((not hasattr(self, 'prepared_functions')) or (not all([(func in self.prepared_functions) for func in ['lomb', 'lomb_dirsum']]))):
self._compile_and_prepare_functions(**kwargs)
bsize = min([len(data), batch_size])
if (len(self.streams) < bsize):
self._create_streams((bsize - len(self.streams)))
streams = [self.streams[i] for i in range(bsize)]
max_ndata = max([len(t) for (t, y, dy) in data])
if (freqs is None):
data_with_max_baseline = max(data, key=(lambda d: (max(d[0]) - min(d[0]))))
freqs = self.autofrequency(data_with_max_baseline[0], **kwargs)
df = (freqs[1] - freqs[0])
k0 = get_k0(freqs)
nf = (int(round((max(freqs) / df))) - k0)
freqs = (df * (k0 + np.arange(nf)))
df = (freqs[1] - freqs[0])
k0 = get_k0(freqs)
nf = len(freqs)
check_k0(freqs, k0=k0)
lsps = []
batches = []
while ((len(batches) * batch_size) < len(data)):
start = (len(batches) * batch_size)
finish = (start + min([batch_size, (len(data) - start)]))
batches.append([data[i] for i in range(start, finish)])
m = self.nfft_proc.get_m(nf)
sigma = self.nfft_proc.sigma
kwargs_lsmem = dict(buffered_transfer=True, n0_buffer=max_ndata, use_double=self.use_double, nharmonics=self.nharmonics, use_fft=use_fft)
kwargs_lsmem.update(kwargs)
memory = [LombScargleMemory(sigma, stream, m, k0=k0, **kwargs_lsmem) for stream in streams]
[mem.allocate(nf=nf, **kwargs) for mem in memory]
funcs = (self.function_tuple, self.nfft_proc.function_tuple)
(best_freqs, best_freq_significances) = ([], [])
default_mask = np.array(([True] * len(freqs)))
mask = (default_mask if (ignore_freq_mask is None) else (~ np.asarray(ignore_freq_mask)))
for (b, batch) in enumerate(batches):
results = self.run(batch, memory=memory, freqs=freqs, use_fft=use_fft, **kwargs)
self.finish()
for (i, (f, p)) in enumerate(results):
if only_return_best_freqs:
best_index = np.argmax(p[mask])
fap = fap_baluev(batch[i][0], batch[i][2], p[mask], np.max(freqs[mask]))
significance = (1.0 - fap[best_index])
best_freqs.append(freqs[mask][best_index])
best_freq_significances.append(significance)
else:
lsps.append(np.copy(p))
if only_return_best_freqs:
return (best_freqs, best_freq_significances)
else:
return [(freqs, lsp) for lsp in lsps] |
class Inertial(xmlr.Object):
def __init__(self, mass=0.0, inertia=None, origin=None):
self.mass = mass
self.inertia = inertia
self.origin = origin |
class InteractionNet(pyg.nn.MessagePassing):
def __init__(self, edge_index, input_dim, update_edges=True, hidden_layers=1, hidden_dim=None, edge_chunk_sizes=None, aggr_chunk_sizes=None, aggr='sum'):
assert (aggr in ('sum', 'mean')), f'Unknown aggregation method: {aggr}'
super().__init__(aggr=aggr)
if (hidden_dim is None):
hidden_dim = input_dim
edge_index = (edge_index - edge_index.min(dim=1, keepdim=True)[0])
self.num_rec = (edge_index[1].max() + 1)
edge_index[0] = (edge_index[0] + self.num_rec)
self.register_buffer('edge_index', edge_index, persistent=False)
edge_mlp_recipe = ([(3 * input_dim)] + ([hidden_dim] * (hidden_layers + 1)))
aggr_mlp_recipe = ([(2 * input_dim)] + ([hidden_dim] * (hidden_layers + 1)))
if (edge_chunk_sizes is None):
self.edge_mlp = utils.make_mlp(edge_mlp_recipe)
else:
self.edge_mlp = SplitMLPs([utils.make_mlp(edge_mlp_recipe) for _ in edge_chunk_sizes], edge_chunk_sizes)
if (aggr_chunk_sizes is None):
self.aggr_mlp = utils.make_mlp(aggr_mlp_recipe)
else:
self.aggr_mlp = SplitMLPs([utils.make_mlp(aggr_mlp_recipe) for _ in aggr_chunk_sizes], aggr_chunk_sizes)
self.update_edges = update_edges
def forward(self, send_rep, rec_rep, edge_rep):
node_reps = torch.cat((rec_rep, send_rep), dim=1)
(edge_rep_aggr, edge_diff) = self.propagate(self.edge_index, x=node_reps, edge_attr=edge_rep)
rec_diff = self.aggr_mlp(torch.cat((rec_rep, edge_rep_aggr), dim=(- 1)))
rec_rep = (rec_rep + rec_diff)
if self.update_edges:
edge_rep = (edge_rep + edge_diff)
return (rec_rep, edge_rep)
return rec_rep
def message(self, x_j, x_i, edge_attr):
return self.edge_mlp(torch.cat((edge_attr, x_j, x_i), dim=(- 1)))
def aggregate(self, messages, index, ptr, dim_size):
aggr = super().aggregate(messages, index, ptr, self.num_rec)
return (aggr, messages) |
class StatsCollectorTest(unittest.TestCase):
def test_job_metric_collector(self):
collector = JobMetricCollector('1111', 'default', 'local', 'dlrover')
collector.collect_dataset_metric('test', 1000)
speed_monitor = SpeedMonitor()
t = int(time.time())
speed_monitor.set_target_worker_num(1)
speed_monitor.collect_global_step(100, t)
speed_monitor.collect_global_step(1100, (t + 10))
speed_monitor.add_running_worker(NodeType.WORKER, 0)
worker = Node(NodeType.WORKER, 0, None)
collector._stats_reporter._runtime_stats = []
collector.collect_runtime_stats(speed_monitor, [worker])
self.assertEqual(len(collector._runtime_metric.running_nodes), 1)
self.assertEqual(collector._runtime_metric.speed, 100)
self.assertEqual(len(collector._stats_reporter._runtime_stats), 1) |
def eval(args, epoch, dataset, dataloader, flownmt):
flownmt.eval()
flownmt.sync()
reconstruct(epoch, dataset, dataloader, flownmt, args.result_path, args.log)
bleu = translate(epoch, dataset, dataloader, flownmt, args.result_path, args.log)
recon_loss = 0.0
kl_loss = 0.0
length_loss = 0.0
num_insts = 0
num_words = 0
test_k = 3
for (src, tgt, src_masks, tgt_masks) in dataloader:
(recon, kl, llen) = flownmt.loss(src, tgt, src_masks, tgt_masks, nsamples=test_k, eval=True)
recon_loss += recon.sum().item()
kl_loss += kl.sum().item()
length_loss += llen.sum().item()
num_insts += src.size(0)
num_words += tgt_masks.sum().item()
kl_loss = (kl_loss / num_insts)
recon_loss = (recon_loss / num_insts)
length_loss = (length_loss / num_insts)
nll = (kl_loss + recon_loss)
ppl = np.exp(((nll * num_insts) / num_words))
logging('Ave NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}'.format(nll, recon_loss, kl_loss, length_loss, ppl, bleu), args.log)
logging(('-' * 100), args.log)
return (bleu, nll, recon_loss, kl_loss, length_loss, ppl) |
class _CommonSchemaConstants():
LOCAL_IMPORTANCE = 'local_importance'
SUMMARY_IMPORTANCE = 'summary_importance'
METADATA = 'metadata' |
class SteerControllerParam(PIDParam):
kP: float = 4
kI: float = 0.1
kD: float = 0.2
antiwindup: tuple[(float, float)] = ((- 0.5), 0.5)
setpoint_minmax: tuple[(float, float)] = (((- math.pi) / 6), (math.pi / 6))
output_minmax: tuple[(float, float)] = ((- 1), 1)
def from_vehicle_params(cls, vehicle_param: VehicleParameters) -> 'SteerControllerParam':
return SteerControllerParam(setpoint_minmax=((- vehicle_param.delta_max), vehicle_param.delta_max), output_minmax=((- vehicle_param.ddelta_max), vehicle_param.ddelta_max)) |
def forwardXXreverse(args, cpc_model, device, data_loader, output_ark, output_scp):
logger.info('Starting Forward Passing')
cpc_model.eval()
ark_scp_output = ((('ark:| copy-feats --compress=true ark:- ark,scp:' + output_ark) + ',') + output_scp)
with torch.no_grad():
with ko.open_or_fd(ark_scp_output, 'wb') as f:
for [utt_id, data, data_r] in data_loader:
data = data.float().unsqueeze(1).to(device)
data_r = data_r.float().unsqueeze(1).to(device)
data = data.contiguous()
data_r = data.contiguous()
hidden1 = cpc_model.init_hidden1(len(data))
hidden2 = cpc_model.init_hidden2(len(data))
output = cpc_model.predict(data, data_r, hidden1, hidden2)
mat = output.squeeze(0).cpu().numpy()
ko.write_mat(f, mat, key=utt_id[0]) |
def get_imagenet_label_wid_pairs():
path = get_imagenet_path()
dataset = datasets.ImageNet(path, split='val', transform='none')
classes_extended = dataset.classes
wids = dataset.wnids
label_wid_pairs = []
for (a, b) in zip(classes_extended, wids):
label_wid_pairs.append((a[0], b))
return label_wid_pairs |
class Token_transformer(nn.Module):
def __init__(self, dim, in_dim, num_heads, mlp_ratio=1.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, in_dim=in_dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(in_dim)
self.mlp = Mlp(in_features=in_dim, hidden_features=int((in_dim * mlp_ratio)), out_features=in_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = self.attn(self.norm1(x))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x |
class SimulatedDynamics(AbstractDynamics):
def __init__(self):
pass
def apply(self, state, action, dt):
if (action.reset_seq or (action.reference is not state.reference) or (action.reference is None)):
seq = 1
elif action.finish_sequence:
seq = 0
else:
seq = (state.seq + 1)
if (action.gripper_cmd is not None):
if (action.gripper_cmd == 'close'):
gripper_closed = True
elif (action.gripper_cmd == 'open'):
gripper_closed = False
else:
raise RuntimeError(('Unrecognized gripper command: "%s"' % str(action.gripper_cmd)))
else:
gripper_closed = state.gripper_closed
if (state.q.shape == action.q.shape):
q = action.q
else:
q = (state.q + (action.dq * dt))
if (action.traj is None):
traj = state.traj
else:
traj = action.traj
return CostarState(state.actor_id, q=q, dq=action.dq, seq=seq, gripper_closed=gripper_closed, finished_last_sequence=action.finish_sequence, traj=traj, reference=action.reference, code=action.code) |
def test_sz_zero_gaussian_spin_overlap():
local_spin_exchange = physics.spin.create_local_spin_exchange(slog_psi_apply=_gaussian_two_particle_wavefn, nelec=jnp.array([1, 1]))
(_, random_x) = _get_random_samples(seed=6, nelec_total=2)
local_spin_exchange_out = local_spin_exchange(None, random_x)
norms = jnp.linalg.norm(random_x, axis=(- 1))
np.testing.assert_allclose(local_spin_exchange_out, (- jnp.exp((2 * (jnp.square(norms[(..., 1)]) - jnp.square(norms[(..., 0)]))))), rtol=1e-05) |
def test_cls_and_dtype_conversion(simple_dtype):
s = m.SimpleStruct()
assert (s.astuple() == (False, 0, 0.0, 0.0))
assert (m.SimpleStruct.fromtuple(s.astuple()).astuple() == s.astuple())
s.uint_ = 2
assert (m.f_simple(s) == 20)
s_recarray = np.array([(False, 2, 0.0, 0.0)], dtype=simple_dtype)
np.testing.assert_array_equal(m.f_simple_vectorized(s_recarray), [20])
s_scalar = s_recarray[0]
assert isinstance(s_scalar, np.void)
assert (m.f_simple(s_scalar) == 20)
s_recarray_scalar = s_recarray.reshape(())
assert isinstance(s_recarray_scalar, np.ndarray)
assert (s_recarray_scalar.dtype == simple_dtype)
with pytest.raises(TypeError) as excinfo:
m.f_simple(s_recarray_scalar)
assert ('incompatible function arguments' in str(excinfo.value))
assert (m.f_simple(m.SimpleStruct.fromtuple(s_recarray_scalar.item())) == 20)
s_array_object = np.array([s])
assert (s_array_object.dtype == object)
with pytest.raises(TypeError) as excinfo:
m.f_simple_vectorized(s_array_object)
assert ('incompatible function arguments' in str(excinfo.value))
s_array = np.array([s.astuple()], dtype=simple_dtype)
np.testing.assert_array_equal(m.f_simple_vectorized(s_array), [20]) |
('pybaseball.cache.config.enabled', True)
('glob.glob', MagicMock(return_value=['1.cache_record.json']))
('pybaseball.cache.file_utils.load_json', MagicMock(return_value={'expires': '3000-01-01', 'func': 'df_func', 'args': [1, 2], 'kwargs': {'val1': 'a'}, 'dataframe': 'cachefile.csv'}))
def test_call_cache_enabled_loads_cache(mock_data_1: pd.DataFrame, load_mock: MagicMock, save_mock: MagicMock, save_json_mock: MagicMock) -> None:
df_func = MagicMock()
df_func.__name__ = 'df_func'
df_cache = cache.df_cache()
assert df_cache.cache_config.enabled
wrapper = df_cache.__call__(df_func)
result = wrapper(*(1, 2), **{'val1': 'a'})
load_mock.assert_called_once()
df_func.assert_not_called()
save_mock.assert_not_called()
assert isinstance(result, pd.DataFrame)
pd.testing.assert_frame_equal(result, mock_data_1) |
class TestBasicTuningStrategy(unittest.TestCase):
def setUpClass(self):
self.constant_graph = build_fake_model()
self.workspace = os.path.abspath(options.workspace)
def tearDownClass(self):
shutil.rmtree('saved', ignore_errors=True)
shutil.rmtree(self.workspace)
def test_run_basic_one_trial_new_api(self):
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DATALOADERS, Datasets
from neural_compressor.quantization import fit
dataset = Datasets('tensorflow')['dummy']((100, 3, 3, 1))
dataloader = DATALOADERS['tensorflow'](dataset)
def fake_eval(model):
return 1
conf = PostTrainingQuantConfig()
q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_func=fake_eval)
self.assertIsNotNone(q_model)
def test_diagnosis(self):
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DATALOADERS, Datasets
from neural_compressor.quantization import fit
dataset = Datasets('tensorflow')['dummy']((100, 3, 3, 1))
dataloader = DATALOADERS['tensorflow'](dataset)
conf = PostTrainingQuantConfig(diagnosis=True)
q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_func=(lambda model: 1))
self.assertEqual(os.path.exists(os.path.join(self.workspace, 'inspect_saved/fp32/inspect_result.pkl')), True)
self.assertEqual(os.path.exists(os.path.join(self.workspace, 'inspect_saved/quan/inspect_result.pkl')), True)
def test_run_create_eval_from_metric_and_dataloader(self):
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DATALOADERS, Datasets
from neural_compressor.quantization import fit
dataset = Datasets('tensorflow')['dummy']((100, 3, 3, 1))
dataloader = DATALOADERS['tensorflow'](dataset)
from neural_compressor.metric import METRICS
metrics = METRICS('tensorflow')
top1 = metrics['topk']()
conf = PostTrainingQuantConfig()
q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_dataloader=dataloader, eval_metric=top1)
def test_no_tuning(self):
import torchvision
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DATALOADERS, Datasets
from neural_compressor.quantization import fit
conf = PostTrainingQuantConfig()
conf.performance_only = True
dataset = Datasets('pytorch')['dummy']((1, 3, 224, 224))
dataloader = DATALOADERS['pytorch'](dataset)
model = torchvision.models.resnet18()
conf = PostTrainingQuantConfig(quant_level=1)
q_model = fit(model=model, conf=conf, calib_dataloader=dataloader)
self.assertIsNotNone(q_model)
def test_block_wise_tuining_stock_pt(self):
from transformers import BertModel, BertTokenizer
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.quantization import fit
for backend in ['default']:
model_name = 'bert-base-uncased'
model = BertModel.from_pretrained(model_name)
model.eval()
acc_res_lst = (([1.0] + ([0.9] * 7)) + ([1.1] * 3))
def fake_eval(model):
res = acc_res_lst.pop(0)
return res
class DummyNLPDataloader(object):
def __init__(self, model_name):
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.sequence_a = 'intel-extension-for-transformers is based in SH'
self.sequence_b = 'Where is intel-extension-for-transformers based? NYC or SH'
self.encoded_dict = self.tokenizer(self.sequence_a, self.sequence_b, return_tensors='pt')
self.batch_size = 1
def __iter__(self):
(yield self.encoded_dict)
def __next__(self):
return self.encoded_dict
dataloader = DummyNLPDataloader(model_name)
conf = PostTrainingQuantConfig(backend=backend)
q_model = fit(model=model, conf=conf, calib_dataloader=dataloader, eval_func=fake_eval)
assert (q_model is not None) |
def save_pil(I, out_dir, pair_id, img_id):
I.save(os.path.join(out_dir, '{}_{}.jpg'.format(pair_id, img_id))) |
def val():
epoch_error = 0
valid_iteration = 0
three_px_acc_all = 0
model.eval()
for (iteration, batch) in enumerate(testing_data_loader):
(input1, input2, target) = (Variable(batch[0], requires_grad=False), Variable(batch[1], requires_grad=False), Variable(batch[2], requires_grad=False))
if cuda:
input1 = input1.cuda()
input2 = input2.cuda()
target = target.cuda()
target = torch.squeeze(target, 1)
mask = (target < opt.maxdisp)
mask.detach_()
valid = target[mask].size()[0]
if (valid > 0):
with torch.no_grad():
disp = model(input1, input2)
error = torch.mean(torch.abs((disp[mask] - target[mask])))
valid_iteration += 1
epoch_error += error.item()
pred_disp = disp.cpu().detach()
true_disp = target.cpu().detach()
disp_true = true_disp
index = np.argwhere((true_disp < opt.maxdisp))
disp_true[(index[0][:], index[1][:], index[2][:])] = np.abs((true_disp[(index[0][:], index[1][:], index[2][:])] - pred_disp[(index[0][:], index[1][:], index[2][:])]))
correct = ((disp_true[(index[0][:], index[1][:], index[2][:])] < 1) | (disp_true[(index[0][:], index[1][:], index[2][:])] < (true_disp[(index[0][:], index[1][:], index[2][:])] * 0.05)))
three_px_acc = (1 - (float(torch.sum(correct)) / float(len(index[0]))))
three_px_acc_all += three_px_acc
print('===> Test({}/{}): Error: ({:.4f} {:.4f})'.format(iteration, len(testing_data_loader), error.item(), three_px_acc))
sys.stdout.flush()
print('===> Test: Avg. Error: ({:.4f} {:.4f})'.format((epoch_error / valid_iteration), (three_px_acc_all / valid_iteration)))
return (three_px_acc_all / valid_iteration) |
def resolve_schubert_conditions(ndim, kdim, brackets, verbose=True):
from phcpy.phcpy2c3 import py2c_schubert_resolve_conditions as resolve
nbc = len(brackets)
cds = ''
for bracket in brackets:
for num in bracket:
cds = ((cds + ' ') + str(num))
roco = resolve(ndim, kdim, nbc, len(cds), cds, int(verbose))
return roco |
def replace_unk_e2e_(beam_lst, lst_src, int_order):
result = []
for (idx, num) in enumerate(int_order):
fields = get_e2e_poswrds(lst_src[num])
fields = [wrd for ((k, idx), wrd) in fields.items()]
result.append(fields)
result_2 = []
x_idx = 0
for ii in range(len(beam_lst)):
try:
x = result[x_idx]
y = beam_lst[ii]
except:
print('x_idx is out of range for x:', x_idx, ii)
try:
(y, _, _, rank, copy) = y.split('|||')
except:
continue
if (int(rank) == 0):
copy = ast.literal_eval(copy)
y = y.split()
for (idx, elem) in enumerate(y):
if (elem == '<unk>'):
if ((copy[idx] >= 0) and (copy[idx] < len(x))):
y[idx] = x[copy[idx]]
if ('<eos>' in y):
temp_id = y.index('<eos>')
y = y[:temp_id]
result_2.append(' '.join(y))
x_idx += 1
return result_2 |
class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class DriveValue():
MAX = 1.0
MIN = (- 1.0)
DELTA = 0.05
value = 0.0
def reset(self):
self.value = 0.0
return self.value
def incr(self, by_value=0):
self.value = min(self.MAX, (self.value + (by_value if (by_value != 0) else self.DELTA)))
return round(self.value, 3)
def decr(self, by_value=0):
self.value = max(self.MIN, (self.value - (by_value if (by_value != 0) else self.DELTA)))
return round(self.value, 3)
def max(self):
self.value = self.MAX
return self.value
def min(self):
self.value = self.MIN
return self.value
def write(self, value):
self.value = value
return self.value
def read(self):
return round(self.value, 3) |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=(- 100), sequence_a_segment_id=0, mask_padding_with_zero=True):
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info('Writing example %d of %d', ex_index, len(examples))
tokens = []
label_ids = []
for (word, label) in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
label_ids.extend(([label_map[label]] + ([pad_token_label_id] * (len(word_tokens) - 1))))
special_tokens_count = (3 if sep_token_extra else 2)
if (len(tokens) > (max_seq_length - special_tokens_count)):
tokens = tokens[:(max_seq_length - special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = ([sequence_a_segment_id] * len(tokens))
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = ([cls_token] + tokens)
label_ids = ([pad_token_label_id] + label_ids)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
label_ids = (([pad_token_label_id] * padding_length) + label_ids)
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([(0 if mask_padding_with_zero else 1)] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([pad_token_label_id] * padding_length)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
if (ex_index < 5):
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(x) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(x) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(x) for x in label_ids]))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids))
return features |
class JpegNoise(ImageAugmentor):
def __init__(self, quality_range=(40, 100)):
super(JpegNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.randint(*self.quality_range)
def _augment(self, img, q):
enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
return cv2.imdecode(enc, 1) |
def check_train_all(raw_data, directions, all_test_data):
mess_up_train = {}
data_sizes = {}
print(f'checking training data againsts # {len(all_test_data)} sentences')
print(f'example test data: ', [s for (i, s) in enumerate(all_test_data.keys()) if (i < 10)])
for direction in directions:
(src, tgt) = direction.split('-')
path = f'{raw_data}/en_XX/{direction}/all'
src_path = f'{path}.{src}'
tgt_path = f'{path}.{tgt}'
print(f'checking {src_path} {tgt_path}')
(_, size, overlapped_size_counted_dup) = check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train)
data_sizes[direction] = (size, overlapped_size_counted_dup)
return (mess_up_train, data_sizes) |
class ResNet(nn.Module):
def __init__(self, cfg):
super(ResNet, self).__init__()
stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC]
stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY]
transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC]
self.stem = stem_module(cfg)
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
stage2_bottleneck_channels = (num_groups * width_per_group)
stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
self.stages = []
self.return_features = {}
for stage_spec in stage_specs:
name = ('layer' + str(stage_spec.index))
stage2_relative_factor = (2 ** (stage_spec.index - 1))
bottleneck_channels = (stage2_bottleneck_channels * stage2_relative_factor)
out_channels = (stage2_out_channels * stage2_relative_factor)
stage_with_dcn = cfg.MODEL.RESNETS.STAGE_WITH_DCN[(stage_spec.index - 1)]
module = _make_stage(transformation_module, in_channels, bottleneck_channels, out_channels, stage_spec.block_count, num_groups, cfg.MODEL.RESNETS.STRIDE_IN_1X1, first_stride=(int((stage_spec.index > 1)) + 1), dcn_config={'stage_with_dcn': stage_with_dcn, 'with_modulated_dcn': cfg.MODEL.RESNETS.WITH_MODULATED_DCN, 'deformable_groups': cfg.MODEL.RESNETS.DEFORMABLE_GROUPS})
in_channels = out_channels
self.add_module(name, module)
self.stages.append(name)
self.return_features[name] = stage_spec.return_features
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
def _freeze_backbone(self, freeze_at):
if (freeze_at < 0):
return
for stage_index in range(freeze_at):
if (stage_index == 0):
m = self.stem
else:
m = getattr(self, ('layer' + str(stage_index)))
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
outputs = []
x = self.stem(x)
for stage_name in self.stages:
x = getattr(self, stage_name)(x)
if self.return_features[stage_name]:
outputs.append(x)
return outputs |
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py is deprecated, please use L2Norm in mmdet/models/necks/ssd_neck.py instead') |
def test_mse():
model_input = np.asarray([0.5, 0.75])
model_output = np.asarray([0.2, 0.5])
expected = (((0.3 ** 2) + (0.25 ** 2)) / 2)
actual = mse(model_input, model_output)
assert np.isclose(actual, expected)
actual = np.square(np.subtract(model_input, model_output)).mean(axis=0)
assert np.isclose(actual, expected)
model_input = np.asarray([[[[0.5, 0.75, 0.25]], [[0.5, 0.75, 0.25]]]])
assert (model_input.shape == (1, 2, 1, 3))
model_output = np.asarray([[[[0.2, 0.75, 0.5]], [[0.4, 0.75, 0.75]]]])
assert (model_output.shape == model_input.shape)
expected = (((((0.3 ** 2) + (0.25 ** 2)) + (0.1 ** 2)) + (0.5 ** 2)) / 4)
actual = mse(model_input, model_output, indices=[0, 2])
assert np.isclose(actual, expected)
model_input = np.asarray([[[[0.5, 0.75, 0.25]], [[0.5, 0.75, 0.25]]]])
assert (model_input.shape == (1, 2, 1, 3))
model_output = np.asarray([[[[0.2, 0.75, 0.5]], [[0.4, 0.75, 0.75]]]])
assert (model_output.shape == model_input.shape)
mask = np.asarray([[[0, 1, 1]], [[1, 0, 0]]])
assert (mask.shape == (2, 1, 3))
expected = (((((((0.0 ** 2) + (0.0 ** 2)) + (0.25 ** 2)) + (0.1 ** 2)) + (0.0 ** 2)) + (0.0 ** 2)) / 6)
actual = mse(model_input, model_output, mask=mask, mask_norm=False)
assert np.isclose(actual, expected)
expected = ((((0.0 ** 2) + (0.25 ** 2)) + (0.1 ** 2)) / 3)
actual = mse(model_input, model_output, mask=mask)
assert np.isclose(actual, expected) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class TerminalGraphics(Graphics):
def __init__(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
self.bottom_row = 0
def wait(self):
self.stdscr.addstr((self.bottom_row + 2), 0, 'Press any key...')
self.stdscr.getch()
def getChar(self):
self.stdscr.addstr((self.bottom_row + 2), 0, 'Input command: ')
return self.stdscr.getch()
def write(self, x, y, string, settings=None):
if (not (settings is None)):
self.stdscr.addstr(x, y, string, settings)
else:
self.stdscr.addstr(x, y, string)
def writeLine(self, y, string, settings=None):
if (not (settings is None)):
self.stdscr.addstr((self.bottom_row + y), 0, string, settings)
else:
self.stdscr.addstr((self.bottom_row + y), 0, string)
def drawWorld(self, world, draw_actors=True):
for i in range(world.worldmap.shape[0]):
for j in range(world.worldmap.shape[1]):
if (world.worldmap[(i, j)] == W.DirectionEast):
self.stdscr.addstr(i, j, '>', curses.color_pair(0))
elif (world.worldmap[(i, j)] == W.DirectionWest):
self.stdscr.addstr(i, j, '<', curses.color_pair(0))
elif (world.worldmap[(i, j)] == W.DirectionNorth):
self.stdscr.addstr(i, j, '^', curses.color_pair(0))
elif (world.worldmap[(i, j)] == W.DirectionSouth):
self.stdscr.addstr(i, j, 'v', curses.color_pair(0))
elif (world.worldmap[(i, j)] == W.Sidewalk):
self.stdscr.addstr(i, j, '#', curses.color_pair(0))
elif (world.worldmap[(i, j)] == W.Intersection):
self.stdscr.addstr(i, j, 'X', curses.color_pair(0))
else:
self.stdscr.addstr(i, j, ' ')
if draw_actors:
for actor in world.actors:
if ((actor.state.x >= 0) and (actor.state.y >= 0)):
self.stdscr.addstr(actor.state.y, actor.state.x, actor.name, ((curses.color_pair(1) + curses.A_BOLD) + curses.A_UNDERLINE))
self.bottom_row = i
def close(self):
curses.endwin() |
def parse_einsum_input(args, shapes=False, tuples=False, constants=None):
if (not isinstance(args[0], str)):
(eq, arrays) = convert_from_interleaved(args)
else:
(eq, *arrays) = args
if shapes:
if (constants is not None):
_shapes = tuple(((ar.shape(s) if (i in constants) else s) for (i, s) in enumerate(arrays)))
else:
_shapes = arrays
if (not isinstance(next((d for s in _shapes for d in s), 1), int)):
_shapes = tuple((tuple((int(d) for d in s)) for s in _shapes))
elif (not isinstance(_shapes[0], tuple)):
_shapes = tuple((tuple(s) for s in _shapes))
else:
_shapes = tuple(_shapes)
else:
_shapes = tuple(map(ar.shape, arrays))
(inputs, output) = parse_equation_ellipses(eq, _shapes, tuples=tuples)
return (inputs, output, arrays) |
class GNMTGlobalScorer(object):
def __init__(self, alpha, beta, cov_penalty, length_penalty):
self.alpha = alpha
self.beta = beta
penalty_builder = penalties.PenaltyBuilder(cov_penalty, length_penalty)
self.cov_penalty = penalty_builder.coverage_penalty()
self.length_penalty = penalty_builder.length_penalty()
def score(self, beam, logprobs):
normalized_probs = self.length_penalty(beam, logprobs, self.alpha)
if (not beam.stepwise_penalty):
penalty = self.cov_penalty(beam, beam.global_state['coverage'], self.beta)
normalized_probs -= penalty
return normalized_probs
def update_score(self, beam, attn):
if ('prev_penalty' in beam.global_state.keys()):
beam.scores.add_(beam.global_state['prev_penalty'])
penalty = self.cov_penalty(beam, (beam.global_state['coverage'] + attn), self.beta)
beam.scores.sub_(penalty)
def update_global_state(self, beam):
if (len(beam.prev_ks) == 1):
beam.global_state['prev_penalty'] = beam.scores.clone().fill_(0.0)
beam.global_state['coverage'] = beam.attn[(- 1)]
self.cov_total = beam.attn[(- 1)].sum(1)
else:
self.cov_total += torch.min(beam.attn[(- 1)], beam.global_state['coverage']).sum(1)
beam.global_state['coverage'] = beam.global_state['coverage'].index_select(0, beam.prev_ks[(- 1)]).add(beam.attn[(- 1)])
prev_penalty = self.cov_penalty(beam, beam.global_state['coverage'], self.beta)
beam.global_state['prev_penalty'] = prev_penalty |
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
try:
os.environ['PYTHONHASHSEED'] = str(seed)
except:
pass |
def get_config_updates(updates):
config_updates = {}
named_configs = []
if (not updates):
return (config_updates, named_configs)
for upd in updates:
if (upd == ''):
continue
(path, sep, value) = upd.partition('=')
if (sep == '='):
path = path.strip()
value = value.strip()
set_by_dotted_path(config_updates, path, _convert_value(value))
else:
named_configs.append(path)
return (config_updates, named_configs) |
class OptimizationParams(ParamGroup):
def __init__(self, parser):
self.dataloader = False
coefficient = 4
self.coarse_iterations = 0
self.position_lr_init = (0.00016 * coefficient)
self.position_lr_final = (1.6e-06 * coefficient)
self.position_lr_delay_mult = 0.01
self.position_lr_max_steps = 20000
self.deformation_lr_init = (0.00016 * coefficient)
self.deformation_lr_final = (1.6e-05 * coefficient)
self.deformation_lr_delay_mult = 0.01
self.grid_lr_init = (0.0016 * coefficient)
self.grid_lr_final = (0.00016 * coefficient)
self.feature_lr = (0.0025 * coefficient)
self.opacity_lr = (0.05 * coefficient)
self.scaling_lr = (0.005 * coefficient)
self.rotation_lr = (0.001 * coefficient)
self.percent_dense = 0.01
self.lambda_dssim = 0
self.lambda_lpips = 0
self.weight_constraint_init = 1
self.weight_constraint_after = 0.2
self.weight_decay_iteration = 5000
self.opacity_reset_interval = 3000
self.densification_interval = 100
self.densify_from_iter = 500
self.densify_until_iter = 15000
self.densify_grad_threshold_coarse = 0.0002
self.densify_grad_threshold_fine_init = 0.0002
self.densify_grad_threshold_after = 0.0002
self.pruning_from_iter = 500
self.pruning_interval = 100
self.opacity_threshold_coarse = 0.005
self.opacity_threshold_fine_init = 0.005
self.opacity_threshold_fine_after = 0.005 |
def get_driving_stereo_images(base_path, start_sample=0):
left_images = glob.glob(f'{base_path}/left/*.png')
left_images.sort()
right_images = glob.glob(f'{base_path}/right/*.png')
right_images.sort()
depth_images = glob.glob(f'{base_path}/depth/*.png')
depth_images.sort()
return (left_images[start_sample:], right_images[start_sample:], depth_images[start_sample:]) |
def run():
logging_GOCD.init_logging(log_file_path=param_log_file_path, log_file_mode=param_log_mode)
logging.info('Preparing before training.')
sys.path.append('..')
from symbol_farm import symbol_10_320_20L_5scales_v2 as net
(net_symbol, data_names, label_names) = net.get_net_symbol()
net_initializer = mxnet.initializer.Xavier()
logging.info('Get net symbol successfully.')
from data_provider_farm.pickle_provider import PickleProvider
from data_iterator_farm.multithread_dataiter_for_cross_entropy_v2 import Multithread_DataIter_for_CrossEntropy as DataIter
train_data_provider = PickleProvider(param_trainset_pickle_file_path)
train_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_train_dataiter, data_provider=train_data_provider, batch_size=param_train_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
val_dataiter = None
if ((param_valset_pickle_file_path != '') and (param_val_batch_size != 0) and (param_num_val_loops != 0) and (param_num_thread_val_dataiter != 0)):
val_data_provider = PickleProvider(param_valset_pickle_file_path)
val_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_val_dataiter, data_provider=val_data_provider, batch_size=param_val_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
from metric_farm.metric_default import Metric
train_metric = Metric(param_num_output_scales)
val_metric = None
if (val_dataiter is not None):
val_metric = Metric(param_num_output_scales)
train_GOCD.start_train(param_dict=param_dict, mxnet_module=mxnet, context=[mxnet.gpu(i) for i in param_GPU_idx_list], train_dataiter=train_dataiter, train_metric=train_metric, train_metric_update_frequency=param_train_metric_update_frequency, num_train_loops=param_num_train_loops, val_dataiter=val_dataiter, val_metric=val_metric, num_val_loops=param_num_val_loops, validation_interval=param_validation_interval, optimizer_name=param_optimizer_name, optimizer_params=param_optimizer_params, net_symbol=net_symbol, net_initializer=net_initializer, net_data_names=data_names, net_label_names=label_names, pretrained_model_param_path=param_pretrained_model_param_path, display_interval=param_display_interval, save_prefix=param_save_prefix, model_save_interval=param_model_save_interval, start_index=param_start_index) |
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[(- 1)].value
output_weights = tf.compat.v1.get_variable('output_weights', [num_labels, hidden_size], initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02))
output_bias = tf.compat.v1.get_variable('output_bias', [num_labels], initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('loss'):
if is_training:
output_layer = tf.nn.dropout(output_layer, rate=0.1)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=(- 1))
log_probs = tf.nn.log_softmax(logits, axis=(- 1))
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = (- tf.reduce_sum((one_hot_labels * log_probs), axis=(- 1)))
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities) |
def _interpolate(img, class_info, magnitude):
m = float_parameter(magnitude, 1)
x = img
p = class_info['weights']
if (len(p) < 1):
return (img, [])
k = max(1, int((len(class_info['pool']) * 0.05)))
idxs = np.random.choice(len(class_info['pool']), k, p=p)
distances = cosine((class_info['pool'][idxs] - class_info['mean']), (x.detach().cpu().view((- 1)) - class_info['mean']))
idx = idxs[np.argmax(distances)]
y = class_info['pool'][idx]
x_hat = (((y.cuda() - x) * m) + x)
return (x_hat, [idx]) |
class QConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False, measure=False, cal_qparams=False):
super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.num_bits = num_bits
self.num_bits_weight = (num_bits_weight or num_bits)
self.num_bits_grad = num_bits_grad
self.measure = measure
self.equ_scale = nn.Parameter(torch.ones(out_channels, 1, 1, 1))
if measure:
self.quantize_input = QuantMeasure(self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, (- 1)), measure=measure, cal_qparams=cal_qparams)
self.quantize_weight = QuantMeasure(self.num_bits, shape_measure=((out_channels if perC else 1), 1, 1, 1), flatten_dims=((1, (- 1)) if perC else (0, (- 1))), measure=measure, reduce_dim=(None if perC else 0))
else:
self.quantize_input = QuantThUpdate(self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, (- 1)), measure=measure)
self.quantize_weight = QuantThUpdate(self.num_bits, shape_measure=((out_channels if perC else 1), 1, 1, 1), flatten_dims=((1, (- 1)) if perC else (0, (- 1))), measure=measure, reduce_dim=(None if perC else 0))
self.biprecision = biprecision
self.cal_params = cal_qparams
self.quantize = True
def forward(self, input):
qinput = (self.quantize_input(input) if self.quantize else input)
qweight = (self.quantize_weight((self.weight * self.equ_scale)) if (self.quantize and (not self.cal_params)) else self.weight)
if (self.bias is not None):
qbias = (self.bias if (self.measure or (not self.quantize)) else quantize(self.bias, num_bits=(self.num_bits_weight + self.num_bits), flatten_dims=(0, (- 1))))
else:
qbias = None
if ((not self.biprecision) or (self.num_bits_grad is None)):
output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups)
if (self.num_bits_grad is not None):
output = quantize_grad(output, num_bits=self.num_bits_grad, flatten_dims=(1, (- 1)))
else:
output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad)
return output |
def convert_all_sentencepiece_models(model_list=None, repo_path=None):
save_dir = Path('marian_ckpt')
dest_dir = Path('marian_converted')
dest_dir.mkdir(exist_ok=True)
if (model_list is None):
model_list: list = make_registry(repo_path=repo_path)
for (k, prepro, download, test_set_url) in tqdm(model_list):
if ('SentencePiece' not in prepro):
continue
if (not os.path.exists(((save_dir / k) / 'pytorch_model.bin'))):
download_and_unzip(download, (save_dir / k))
pair_name = convert_opus_name_to_hf_name(k)
convert((save_dir / k), (dest_dir / f'opus-mt-{pair_name}')) |
def _mean_update(vals, m_vals, t):
outputs = []
if (not isinstance(vals, list)):
vals = [vals]
if (not isinstance(m_vals, list)):
m_vals = [m_vals]
for (val, m_val) in zip(vals, m_vals):
output = (((t / float((t + 1))) * m_val) + ((1 / float((t + 1))) * val))
outputs.append(output)
if (len(outputs) == 1):
outputs = outputs[0]
return outputs |
_criterion('nat_seq_loss')
class SeqCriterion(LabelSmoothedDualImitationCriterion):
def add_args(parser):
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--use-ngram', action='store_true')
parser.add_argument('--use-rl', action='store_true')
parser.add_argument('--rl-type', type=str, choices=['base', 'topk', 'traverse'])
parser.add_argument('--n', default=2, type=int)
parser.add_argument('--topk', default=5, type=int)
parser.add_argument('--p', default=1, type=float)
def forward(self, model, sample, reduce=True):
(nsentences, ntokens) = (sample['nsentences'], sample['ntokens'])
(src_tokens, src_lengths) = (sample['net_input']['src_tokens'], sample['net_input']['src_lengths'])
(tgt_tokens, prev_output_tokens) = (sample['target'], sample['prev_target'])
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
(losses, nll_loss) = ([], [])
for obj in outputs:
if (outputs[obj].get('loss', None) is None):
_losses = self._compute_loss(outputs[obj].get('out'), outputs[obj].get('tgt'), outputs[obj].get('mask', None), outputs[obj].get('ls', 0.0), name=(obj + '-wordloss'), factor=outputs[obj].get('factor', 1.0))
if (obj is 'word_ins'):
_losses['loss'] = 0
if self.args.use_ngram:
gram_losses = self._compute_gram_loss(outputs[obj].get('out'), outputs[obj].get('tgt'), outputs[obj].get('mask', None))
_losses['loss'] += gram_losses
if self.args.use_rl:
rl_losses = self._compute_reward_loss(outputs[obj].get('out'), outputs[obj].get('tgt'), outputs[obj].get('mask', None))
_losses['loss'] += rl_losses
losses += [_losses]
if outputs[obj].get('nll_loss', False):
nll_loss += [_losses.get('nll_loss', 0.0)]
loss = sum((l['loss'] for l in losses))
nll_loss = (sum((l for l in nll_loss)) if (len(nll_loss) > 0) else loss.new_tensor(0))
sample_size = 1
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
for l in losses:
logging_output[l['name']] = (utils.item((l['loss'].data / l['factor'])) if reduce else (l[['loss']].data / l['factor']))
return (loss, sample_size, logging_output)
def _compute_gram_loss(self, outputs, targets, masks=None):
if (self.args.n == 1):
loss = self._compute_bow_loss(outputs, targets, masks)
return loss
(batch_size, length) = targets.size()
if (masks is not None):
(outputs, targets) = (outputs[masks], targets[masks])
else:
masks = torch.ones(batch_size, length)
probs = F.softmax(outputs)
target_lens = torch.sum(masks, dim=(- 1)).long().tolist()
targets = targets.data.tolist()
targets = utils.shape(targets, target_lens)
matchs = []
if (self.args.n == 2):
gram_match = utils.twogram_match(targets, target_lens, probs)
elif (self.args.n == 3):
gram_match = utils.threegram_match(targets, target_lens, probs)
elif (self.args.n == 4):
gram_match = utils.fourgram_match(targets, target_lens, probs)
else:
raise NotImplementedError
for i in range(batch_size):
matchs.append((gram_match[i][0] / gram_match[i][1]))
loss = ((- 1) * sum(matchs).div(batch_size))
return loss
def _compute_reward_loss(self, outputs, targets, masks=None):
if (self.args.rl_type == 'base'):
return self._compute_reward_loss_rlbase(outputs, targets, masks)
elif (self.args.rl_type == 'topk'):
return self._compute_reward_loss_rltopk(outputs, targets, masks)
elif (self.args.rl_type == 'traverse'):
return self._compute_reward_loss_rltraverse(outputs, targets, masks)
else:
raise NotImplementedError
def _compute_bow_loss(self, outputs, targets, masks=None):
(batch_size, length, vocab_size) = outputs.size()
if (masks is None):
masks = torch.ones(batch_size, length)
masks = masks.float().view(batch_size, length, 1)
probs = F.softmax(outputs, dim=(- 1))
bow = torch.sum(probs, dim=1)
ref_bow = torch.zeros(batch_size, vocab_size).cuda(probs.get_device())
ones = torch.ones(batch_size, vocab_size).cuda(probs.get_device())
ref_bow.scatter_add_((- 1), targets, ones)
loss = torch.mean(torch.norm((bow - ref_bow), p=self.args.p, dim=(- 1))).div(length)
return loss
def compute_step_reward(self, sample_times, workers, sample_index, sample_prob, targets, target_lens):
list_targets = utils.shape(targets, target_lens)
list_samples = utils.shape(sample_index, target_lens)
count = len(list_samples)
rewards = []
sample_idxs = [torch.multinomial(sample_prob, 1).data.view((- 1)).tolist() for i in range(sample_times)]
inputs = [(sample_idxs[i], list_samples, list_targets, count, target_lens) for i in range(sample_times)]
pool = ProcessPoolExecutor(max_workers=workers)
rewards = list(pool.map(utils.parallel_reward, inputs))
rewards = torch.Tensor(rewards).cuda(sample_prob.get_device())
rewards = torch.mean(rewards, dim=0)
return rewards
def compute_traverse_step_reward(self, sample_times, workers, all_sample_index, sample_prob, targets, target_lens):
list_targets = utils.shape(targets, target_lens)
all_list_samples = [utils.shape(sample_index, target_lens) for sample_index in all_sample_index]
count = len(all_list_samples[0])
rewards = []
sample_idxs = [torch.multinomial(sample_prob, 1).data.view((- 1)).tolist() for i in range(sample_times)]
inputs = [(sample_idxs[i], all_list_samples, list_targets, count, target_lens) for i in range(sample_times)]
pool = ProcessPoolExecutor(max_workers=workers)
rewards = list(pool.map(utils.parallel_reward_tra, inputs))
rewards = torch.Tensor(rewards).cuda(sample_prob.get_device())
rewards = torch.mean(rewards, dim=0)
return rewards
def compute_sentence_reward(self, sample_index, sample_prob, targets, target_lens):
list_targets = utils.shape(targets, target_lens)
list_samples = utils.shape(sample_index, target_lens)
count = len(list_samples)
rewards = []
for i in range(count):
reward = utils.my_sentence_rouge([list_samples[i]], list_targets[i])
for k in range(len(list_samples[i])):
rewards.append(reward)
rewards = torch.Tensor(rewards).cuda(sample_prob.get_device())
return rewards
def _compute_reward_loss_rltopk(self, outputs, targets, masks=None):
(outputs, targets) = (outputs[masks], targets[masks])
probs = F.softmax(outputs, dim=(- 1))
target_lens = torch.sum(masks, dim=(- 1)).long().tolist()
targets = targets.data.tolist()
(top_probs, top_index) = torch.topk(probs, self.args.topk, dim=(- 1))
weight = torch.sum(top_probs, dim=(- 1)).detach()
res_probs = torch.zeros(probs.size()).cuda(probs.get_device())
res_probs.data.copy_(probs.data)
res_probs.scatter_add_(1, top_index, ((- 1) * top_probs))
sample_index = torch.multinomial(res_probs, 1)
del res_probs
sample_prob = torch.gather(probs, (- 1), sample_index)
sample_index = sample_index.data.view((- 1)).tolist()
top_index = top_index.t().data.tolist()
if (self.args.topk != 0):
rewards = self.compute_traverse_step_reward(10, 10, top_index, probs, targets, target_lens)
rewards = rewards[0:self.args.topk]
rewards = torch.t(rewards)
loss_traverse = ((- 1) * torch.sum((top_probs * rewards)))
else:
loss_traverse = 0
reward = self.compute_step_reward(10, 10, sample_index, probs, targets, target_lens)
loss_sample = torch.sum(((((- 1) * (1 - weight)) * torch.log(sample_prob).view((- 1))) * reward), dim=0)
loss = (loss_sample + loss_traverse).div(len(targets))
return loss
def _compute_reward_loss_rlbase(self, outputs, targets, masks=None):
(outputs, targets) = (outputs[masks], targets[masks])
probs = F.softmax(outputs)
target_lens = torch.sum(masks, dim=(- 1)).long().tolist()
targets = targets.data.tolist()
sample_index = torch.multinomial(probs, 1)
sample_prob = torch.gather(probs, (- 1), sample_index)
sample_index = sample_index.data.view((- 1)).tolist()
reward = self.compute_sentence_reward(sample_index, probs, targets, target_lens)
loss_sample = torch.sum((((- 1) * torch.log(sample_prob).view((- 1))) * reward), dim=0)
loss = loss_sample.div(len(targets))
return loss
def _compute_reward_loss_rltraverse(self, outputs, targets, masks=None):
(batch_size, length, vocab_size) = outputs.size()
outputs = outputs.view((batch_size * length), vocab_size)
targets = targets.view((- 1))
probs = F.softmax(outputs, dim=(- 1))
target_lens = ([length] * batch_size)
targets = targets.data.tolist()
rewards = torch.zeros((batch_size * length), vocab_size).cuda(probs.get_device())
ref_words = []
for i in range(length):
ref_words.append([])
for j in range((batch_size * length)):
ref_words[i].append(0)
for i in range(length):
for j in range(batch_size):
ref_words[i][(j * length):((j + 1) * length)] = ([targets[((j * length) + i)]] * length)
ref_rewards = self.compute_traverse_step_reward(10, 10, ref_words, probs, targets, target_lens)
ref_rewards = ref_rewards.view((length + 1), (batch_size * length), 1)
ref_words = torch.LongTensor(ref_words).view(length, (batch_size * length), 1).cuda(probs.get_device())
rewards += ref_rewards[length]
for i in range(length):
rewards.scatter_(1, ref_words[i], ref_rewards[i])
loss = (- torch.sum((torch.sum((probs * rewards), dim=(- 1)) * masks.view((- 1)))).div(len(targets)))
return loss |
def convolutional_model_simple(input_shape=(NUM_FRAMES, 64, 1), batch_size=(BATCH_SIZE * TRIPLET_PER_BATCH), num_frames=NUM_FRAMES):
def conv_and_res_block(inp, filters, stage):
conv_name = 'conv{}-s'.format(filters)
o = Conv2D(filters, kernel_size=5, strides=2, padding='same', kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(l=1e-05), name=conv_name)(inp)
o = BatchNormalization(name=(conv_name + '_bn'))(o)
o = clipped_relu(o)
for i in range(3):
o = identity_block2(o, kernel_size=3, filters=filters, stage=stage, block=i)
return o
def cnn_component(inp):
x_ = conv_and_res_block(inp, 64, stage=1)
x_ = conv_and_res_block(x_, 128, stage=2)
x_ = conv_and_res_block(x_, 256, stage=3)
return x_
inputs = Input(shape=input_shape)
x = cnn_component(inputs)
x = Lambda((lambda y: K.reshape(y, ((- 1), math.ceil((num_frames / 8)), 2048))), name='reshape')(x)
x = Lambda((lambda y: K.mean(y, axis=1)), name='average')(x)
x = Dense(512, name='affine')(x)
x = Lambda((lambda y: K.l2_normalize(y, axis=1)), name='ln')(x)
model = Model(inputs, x, name='convolutional')
return model |
def get_history(episode_stats, reward_function):
jerk_history = episode_stats['jerk_history']
state_history = episode_stats['state_history']
control_history = episode_stats['control_history']
crashed = episode_stats['crashed']
merged = episode_stats['merged']
episode_history = []
episode_length = len(state_history)
for i in range((episode_length - 1)):
previous_state = state_history[i]
next_state = state_history[(i + 1)]
previous_action = control_history[i]
next_jerk = jerk_history[(i + 1)]
if (i == (episode_length - 2)):
current_crashed = crashed
current_merged = merged
else:
current_crashed = False
current_merged = False
reward = reward_function(next_state, next_jerk, current_crashed, current_merged)
episode_history.append(History(previous_state, next_state, previous_action, reward))
return episode_history |
class TFRecordsConverter(object):
def __init__(self, midi_path, output_dir, num_shards_train=3, num_shards_test=1):
self.output_dir = output_dir
self.num_shards_train = num_shards_train
self.num_shards_test = num_shards_test
if (not os.path.exists(self.output_dir)):
os.makedirs(self.output_dir)
(self.es_seq_list, self.ctrl_seq_list) = self.process_midi_from_dir(midi_path)
self.counter = 0
pass
def process_midi_from_dir(self, midi_root):
midi_paths = list(utils.find_files_by_extensions(midi_root, ['.mid', '.midi', '.MID']))
es_seq_list = []
ctrl_seq_list = []
for path in Bar('Processing').iter(midi_paths):
print(' ', end='[{}]'.format(path), flush=True)
try:
data = preprocess_midi(path)
for (es_seq, ctrl_seq) in data:
max_len = par.max_seq
for idx in range((max_len + 1)):
es_seq_list.append(data[0])
ctrl_seq_list.append(data[1])
except KeyboardInterrupt:
print(' Abort')
return
except:
print(' Error')
continue
return (es_seq_list, ctrl_seq_list)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def __write_to_records(self, output_path, indicies):
writer = tf.io.TFRecordWriter(output_path)
for i in indicies:
es_seq = self.es_seq_list[i]
ctrl_seq = self.ctrl_seq_list[i] |
def reverse_transform(inp):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = ((std * inp) + mean)
inp = np.clip(inp, 0, 1)
inp = (inp * 255).astype(np.uint8)
return inp |
def test_dict():
cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test')
for filename in ['a.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
cfg = Config.fromfile(cfg_file)
assert (len(cfg) == 4)
assert (set(cfg.keys()) == set(cfg_dict.keys()))
assert (set(cfg._cfg_dict.keys()) == set(cfg_dict.keys()))
for value in cfg.values():
assert (value in cfg_dict.values())
for (name, value) in cfg.items():
assert (name in cfg_dict)
assert (value in cfg_dict.values())
assert (cfg.item1 == cfg_dict['item1'])
assert (cfg.item2 == cfg_dict['item2'])
assert (cfg.item2.a == 0)
assert (cfg.item3 == cfg_dict['item3'])
assert (cfg.item4 == cfg_dict['item4'])
with pytest.raises(AttributeError):
cfg.not_exist
for name in ['item1', 'item2', 'item3', 'item4']:
assert (name in cfg)
assert (cfg[name] == cfg_dict[name])
assert (cfg.get(name) == cfg_dict[name])
assert (cfg.get('not_exist') is None)
assert (cfg.get('not_exist', 0) == 0)
with pytest.raises(KeyError):
cfg['not_exist']
assert ('item1' in cfg)
assert ('not_exist' not in cfg)
cfg.update(dict(item1=0))
assert (cfg.item1 == 0)
cfg.update(dict(item2=dict(a=1)))
assert (cfg.item2.a == 1) |
class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestChainInterDataset(Dataset):
def __init__(self, triples, test_ans, test_ans_hard, nentity, nrelation, mode):
self.len = len(triples)
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.mode = mode
self.test_ans = test_ans
self.test_ans_hard = test_ans_hard
self.qtype = self.triples[0][(- 1)]
def __len__(self):
return self.len
def __getitem__(self, idx):
query = self.triples[idx][:(- 2)]
tail = self.triples[idx][(- 2)]
negative_sample = torch.LongTensor(range(self.nentity))
positive_sample = torch.LongTensor(([query[0][0], query[0][1][0], query[0][1][1], query[1][0], query[1][1][0]] + [self.triples[idx][(- 2)]]))
return (positive_sample, negative_sample, self.mode, query)
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
mode = data[0][2]
query = data[0][3]
return (positive_sample, negative_sample, mode, query) |
class RandomDirectionEmitter():
def __init__(self, mutation_power, population_size, feature_map):
self.population_size = population_size
self.sigma = mutation_power
self.individuals_disbatched = 0
self.individuals_evaluated = 0
self.parents = []
self.population = []
self.feature_map = feature_map
self.num_features = len(self.feature_map.feature_ranges)
self.reset()
def reset(self):
self.mutation_power = self.sigma
if (len(self.feature_map.elite_map) == 0):
self.mean = np.asarray(([0.0] * num_params))
else:
self.mean = self.feature_map.get_random_elite().param_vector
self.direction = np.asarray([np.random.normal(0.0, 1.0) for _ in range(self.num_features)])
self.pc = np.zeros((num_params,), dtype=np.float_)
self.ps = np.zeros((num_params,), dtype=np.float_)
self.C = DecompMatrix(num_params)
self.individuals_evaluated = 0
def check_stop(self, parents):
if (self.C.condition_number > .0):
return True
area = (self.mutation_power * math.sqrt(max(self.C.eigenvalues)))
if (area < 1e-11):
return True
if (abs((parents[0].fitness - parents[(- 1)].fitness)) < 1e-12):
return True
return False
def generate_individual(self):
unscaled_params = (np.random.normal(0.0, self.mutation_power, num_params) * np.sqrt(self.C.eigenvalues))
unscaled_params = np.matmul(self.C.eigenbasis, unscaled_params)
unscaled_params = (self.mean + np.array(unscaled_params))
ind = Individual()
ind.param_vector = unscaled_params
self.individuals_disbatched += 1
return ind
def return_evaluated_individual(self, ind):
self.population.append(ind)
self.individuals_evaluated += 1
if self.feature_map.add(ind):
self.parents.append(ind)
if (len(self.population) < self.population_size):
return
num_parents = len(self.parents)
needs_restart = (num_parents == 0)
feature_mean = (sum([np.array(ind.features) for ind in self.population]) / self.population_size)
if (num_parents > 0):
parents = sorted(self.parents, key=(lambda x: x.delta))[::(- 1)]
for ind in self.parents:
dv = (np.asarray(ind.features) - feature_mean)
ind.projection = np.dot(self.direction, dv)
parents = sorted(self.parents, key=(lambda x: (- x.projection)))
weights = [(math.log((num_parents + 0.5)) - math.log((i + 1))) for i in range(num_parents)]
total_weights = sum(weights)
weights = np.array([(w / total_weights) for w in weights])
mueff = ((sum(weights) ** 2) / sum((weights ** 2)))
cc = ((4 + (mueff / num_params)) / ((num_params + 4) + ((2 * mueff) / num_params)))
cs = ((mueff + 2) / ((num_params + mueff) + 5))
c1 = (2 / (((num_params + 1.3) ** 2) + mueff))
cmu = min((1 - c1), ((2 * ((mueff - 2) + (1 / mueff))) / (((num_params + 2) ** 2) + mueff)))
damps = ((1 + (2 * max(0, (math.sqrt(((mueff - 1) / (num_params + 1))) - 1)))) + cs)
chiN = ((num_params ** 0.5) * ((1 - (1 / (4 * num_params))) + (1.0 / (21 * (num_params ** 2)))))
old_mean = self.mean
self.mean = sum(((ind.param_vector * w) for (ind, w) in zip(parents, weights)))
y = (self.mean - old_mean)
z = np.matmul(self.C.invsqrt, y)
self.ps = (((1 - cs) * self.ps) + ((math.sqrt(((cs * (2 - cs)) * mueff)) / self.mutation_power) * z))
left = ((sum(((x ** 2) for x in self.ps)) / num_params) / (1 - ((1 - cs) ** ((2 * self.individuals_evaluated) / self.population_size))))
right = (2 + (4.0 / (num_params + 1)))
hsig = (1 if (left < right) else 0)
self.pc = (((1 - cc) * self.pc) + ((hsig * math.sqrt(((cc * (2 - cc)) * mueff))) * y))
c1a = (c1 * (1 - (((1 - (hsig ** 2)) * cc) * (2 - cc))))
self.C.C *= ((1 - c1a) - cmu)
self.C.C += (c1 * np.outer(self.pc, self.pc))
for (k, w) in enumerate(weights):
dv = (parents[k].param_vector - old_mean)
self.C.C += (((w * cmu) * np.outer(dv, dv)) / (self.mutation_power ** 2))
if self.check_stop(parents):
needs_restart = True
else:
self.C.update_eigensystem()
(cn, sum_square_ps) = ((cs / damps), sum(((x ** 2) for x in self.ps)))
self.mutation_power *= math.exp(min(1, ((cn * ((sum_square_ps / num_params) - 1)) / 2)))
if needs_restart:
self.reset()
self.population.clear()
self.parents.clear() |
def keep_doc_examples_only(content: str) -> str:
splits = content.split('```')
content = (('```' + '```'.join(splits[1::2])) + '```')
lines_to_keep = []
for line in content.split('\n'):
line = re.sub('#.*$', '', line)
if ((len(line) != 0) and (not line.isspace())):
lines_to_keep.append(line)
return '\n'.join(lines_to_keep) |
class RobotMock():
def __init__(self, *args, **kwargs):
self.camera = CameraMock()
self.base = BaseMock() |
class TomOrangesState(AbstractState):
def __init__(self, world):
self.predicates = []
self.world = world
self.grasped_name = None
self.grasped_state = None
self.grasped = False
self.orange_is_good = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.