code stringlengths 101 5.91M |
|---|
def generate_model_info():
op_stats = OpStats()
tensor_stats = TensorStats()
all_ops = tf.get_default_graph().get_operations()
op_stats.op_count = len(all_ops)
for op in all_ops:
if ('update_' in op.name):
op_stats.update_op_count += 1
if (op.name.endswith('/read') or op.name.endswith('/Read/ReadVariableOp')):
op_stats.read_op_count += 1
variables = tf.global_variables()
tensor_stats.variable_count = len(variables)
for var in variables:
shape = var.get_shape().as_list()
if hasattr(var, 'key_dtype'):
tensor_stats.kv_embedding_dims.append(int(shape[(- 1)]))
else:
var_size = 1
for dimesion in shape:
var_size *= dimesion
tensor_stats.total_variable_size += var_size
tensor_stats.max_variable_size = max(tensor_stats.max_variable_size, var_size)
model_info = ModelInfo(tensor_stats, op_stats)
return model_info |
def get_sources_from_local_dir(globs, base_path):
return {Source.create(filename) for filename in iterate_all_python_files(base_path)} |
def copy_dict(ori_dict: Union[(dict, Generator)]):
generator = (ori_dict.items() if isinstance(ori_dict, dict) else ori_dict)
copied_dict = dict()
for (key, param) in generator:
copied_dict[key] = param
return copied_dict |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-dataset', help='name of dataset;', type=str, choices=DATASETS, required=True)
parser.add_argument('-model', help='name of model;', type=str, required=True)
parser.add_argument('--num-rounds', help='number of rounds to simulate;', type=int, default=(- 1))
parser.add_argument('--eval-every', help='evaluate every ____ rounds;', type=int, default=(- 1))
parser.add_argument('--clients-per-round', help='number of clients trained per round;', type=int, default=(- 1))
parser.add_argument('--batch-size', help='batch size when clients train on data;', type=int, default=10)
parser.add_argument('--seed', help='seed for random client sampling and batch splitting', type=int, default=0)
parser.add_argument('--metrics-name', help='name for metrics file;', type=str, default='metrics', required=False)
parser.add_argument('--metrics-dir', help='dir for metrics file;', type=str, default='metrics', required=False)
parser.add_argument('--use-val-set', help='use validation set;', action='store_true')
epoch_capability_group = parser.add_mutually_exclusive_group()
epoch_capability_group.add_argument('--minibatch', help='None for FedAvg, else fraction;', type=float, default=None)
epoch_capability_group.add_argument('--num-epochs', help='number of epochs when clients train on data;', type=int, default=1)
parser.add_argument('-t', help='simulation time: small, medium, or large;', type=str, choices=SIM_TIMES, default='large')
parser.add_argument('-lr', help='learning rate for local optimizers;', type=float, default=(- 1), required=False)
parser.add_argument('--num-groups', help='num-groups for IFCA', type=int, default=1)
parser.add_argument('--save', help='save path', type=str, default='./output/checkpoint.pkl')
parser.add_argument('--resume', help='save path', type=str, default='')
parser.add_argument('--checkpoint', help='save path', type=str, default='')
return parser.parse_args() |
class RandomGaussianBlur(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
if (random.random() < 0.5):
img = img.filter(ImageFilter.GaussianBlur(radius=random.random()))
return {'image': img, 'label': mask} |
class PriorBox(object):
def __init__(self, cfg):
super(PriorBox, self).__init__()
self.image_size = cfg['min_dim']
self.num_priors = len(cfg['aspect_ratios'])
self.variance = (cfg['variance'] or [0.1])
self.feature_maps = cfg['feature_maps']
self.min_sizes = cfg['min_sizes']
self.max_sizes = cfg['max_sizes']
self.steps = cfg['steps']
self.aspect_ratios = cfg['aspect_ratios']
self.clip = cfg['clip']
self.version = cfg['name']
for v in self.variance:
if (v <= 0):
raise ValueError('Variances must be greater than 0')
def forward(self):
mean = []
if (self.version == 'v2'):
for (k, f) in enumerate(self.feature_maps):
for (i, j) in product(range(f), repeat=2):
f_k = (self.image_size / self.steps[k])
cx = ((j + 0.5) / f_k)
cy = ((i + 0.5) / f_k)
s_k = (self.min_sizes[k] / self.image_size)
mean += [cx, cy, s_k, s_k]
s_k_prime = sqrt((s_k * (self.max_sizes[k] / self.image_size)))
mean += [cx, cy, s_k_prime, s_k_prime]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, (s_k * sqrt(ar)), (s_k / sqrt(ar))]
mean += [cx, cy, (s_k / sqrt(ar)), (s_k * sqrt(ar))]
else:
for (i, k) in enumerate(self.feature_maps):
step_x = step_y = (self.image_size / k)
for (h, w) in product(range(k), repeat=2):
c_x = ((w + 0.5) * step_x)
c_y = ((h + 0.5) * step_y)
c_w = c_h = (self.min_sizes[i] / 2)
s_k = self.image_size
mean += [((c_x - c_w) / s_k), ((c_y - c_h) / s_k), ((c_x + c_w) / s_k), ((c_y + c_h) / s_k)]
if (self.max_sizes[i] > 0):
c_w = c_h = (sqrt((self.min_sizes[i] * self.max_sizes[i])) / 2)
mean += [((c_x - c_w) / s_k), ((c_y - c_h) / s_k), ((c_x + c_w) / s_k), ((c_y + c_h) / s_k)]
for ar in self.aspect_ratios[i]:
if (not (abs((ar - 1)) < 1e-06)):
c_w = ((self.min_sizes[i] * sqrt(ar)) / 2)
c_h = ((self.min_sizes[i] / sqrt(ar)) / 2)
mean += [((c_x - c_w) / s_k), ((c_y - c_h) / s_k), ((c_x + c_w) / s_k), ((c_y + c_h) / s_k)]
output = torch.Tensor(mean).view((- 1), 4)
if self.clip:
output.clamp_(max=1, min=0)
return output |
class CNN_Net(nn.Module):
def __init__(self, device=None):
super(CNN_Net, self).__init__()
self.conv1 = nn.Conv2d(1, 64, 3, 1)
self.conv2 = nn.Conv2d(64, 16, 7, 1)
self.fc1 = nn.Linear(((4 * 4) * 16), 200)
self.fc2 = nn.Linear(200, 10)
def forward(self, x):
x = x.view((- 1), 1, 32, 32)
x = torch.tanh(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = torch.tanh(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view((- 1), ((4 * 4) * 16))
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1) |
class Identity(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, *args):
if (len(args) == 1):
return args[0]
else:
return args |
class IGate(Gate):
def __init__(self, label=None):
super().__init__('i', 1, [], label=label)
def _define(self):
definition = []
q = QuantumRegister(1, 'q')
rule = [(U3Gate(pi, 0, pi), [q[0]], [])]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
return IGate()
def to_matrix(self):
return numpy.array([[1, 0], [0, i]], dtype=complex) |
class SFT_Net_torch(nn.Module):
def __init__(self):
super(SFT_Net_torch, self).__init__()
self.conv0 = nn.Conv2d(3, 64, 3, 1, 1)
sft_branch = []
for i in range(16):
sft_branch.append(ResBlock_SFT_torch())
sft_branch.append(SFTLayer_torch())
sft_branch.append(nn.Conv2d(64, 64, 3, 1, 1))
self.sft_branch = nn.Sequential(*sft_branch)
self.HR_branch = nn.Sequential(nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 3, 3, 1, 1))
self.CondNet = nn.Sequential(nn.Conv2d(8, 128, 4, 4), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 32, 1))
def forward(self, x):
cond = self.CondNet(x[1])
fea = self.conv0(x[0])
res = self.sft_branch((fea, cond))
fea = (fea + res)
out = self.HR_branch(fea)
return out |
class Lamb(torch.optim.Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, adam=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
if (group['weight_decay'] != 0):
grad.add_(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
adam_step = (exp_avg / denom)
r1 = p.data.pow(2).mean().sqrt()
r2 = adam_step.pow(2).mean().sqrt()
r = (1 if ((r1 == 0) or (r2 == 0)) else min((r1 / r2), 10))
state['r1'] = r1
state['r2'] = r2
state['r'] = r
if self.adam:
r = 1
p.data.add_(((- step_size) * r), adam_step)
return loss |
class GroupedIterator(object):
def __init__(self, iterable, chunk_size):
self._len = int(math.ceil((len(iterable) / float(chunk_size))))
self.offset = int(math.ceil((getattr(iterable, 'count', 0) / float(chunk_size))))
self.itr = iterable
self.chunk_size = chunk_size
def __len__(self):
return self._len
def __iter__(self):
return self
def __next__(self):
chunk = []
try:
for _ in range(self.chunk_size):
chunk.append(next(self.itr))
except StopIteration as e:
if (len(chunk) == 0):
raise e
return chunk |
class LitModel(pl.LightningModule):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.args = args
self.model = CLIPScore(use_grammar=opt.use_grammar, joint_out=opt.joint_out)
for p in self.model.clip_model.vision_model.parameters():
p.requires_grad = False
for p in self.model.clip_model.visual_projection.parameters():
p.requires_grad = False
def forward(self, *args, **kwargs):
raise NotImplementedError
def train_dataloader(self):
train_dataset = COCORetrievalDataset(split='karpathy_train', mode='train', args=opt, verbose=verbose)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4, collate_fn=train_dataset.collate_fn)
return train_loader
def val_dataloader(self, split='karpathy_val'):
val_dataset = COCORetrievalDataset(split=split, mode='val', args=opt, verbose=verbose)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=opt.valid_batch_size, shuffle=False, num_workers=4, drop_last=False, collate_fn=val_dataset.collate_fn)
return val_loader
def test_dataloader(self):
return self.val_dataloader('karpathy_test')
def training_step(self, data, batch_idx):
batch = data
self.model.train()
model_out = self.model.train_step(img_feat=batch['img_feats'], text=batch['text'], neg_text=batch['neg_text'])
clip_loss = model_out['clip_loss']
if self.opt.joint_out:
loss = clip_loss
else:
grammar_loss = model_out['grammar_loss']
loss = (clip_loss + grammar_loss)
data_time = self.trainer.profiler.recorded_durations['get_train_batch'][(- 1)]
data_time = torch.tensor(data_time)
logger_logs = {}
logger_logs['loss'] = loss.detach()
logger_logs['clip_loss'] = clip_loss.detach()
if (not self.opt.joint_out):
logger_logs['grammar_loss'] = grammar_loss.detach()
logger_logs['data_time'] = data_time.detach()
for (k, v) in logger_logs.items():
if (k in ['data_time', 'clip_loss', 'grammar_loss']):
self.log(('train/' + k), v, prog_bar=True)
else:
self.log(('train/' + k), v)
return loss
def validation_step(self, data, batch_idx):
batch = data
self.model.eval()
with torch.no_grad():
model_out = self.model.train_step(img_feat=batch['img_feats'], text=batch['text'], neg_text=batch['neg_text'])
if self.opt.joint_out:
clip_loss = model_out['clip_loss']
loss = clip_loss
output = {'loss': loss.detach(), 'clip_loss': clip_loss.detach(), 'img_feat': model_out['img_feat'].detach(), 'text_feat': model_out['text_feat'].detach()}
else:
clip_loss = model_out['clip_loss']
grammar_loss = model_out['grammar_loss']
loss = (clip_loss + grammar_loss)
output = {'loss': loss.detach(), 'clip_loss': clip_loss.detach(), 'grammar_loss': grammar_loss.detach(), 'img_feat': model_out['img_feat'].detach(), 'text_feat': model_out['text_feat'].detach(), 'grammar_pos_pred': model_out['grammar_pos_pred'].detach(), 'grammar_neg_pred': model_out['grammar_neg_pred'].detach()}
return output
def test_step(self, *args, **kwargs):
return self.validation_step(*args, **kwargs)
def validation_epoch_end(self, outputs, split='val'):
outputs = d2comm.gather(outputs)
if d2comm.is_main_process():
assert ((self.trainer.node_rank == 0) and (self.trainer.local_rank == 0))
outputs = sum(outputs, [])
out = {}
val_loss_mean = (sum([_['loss'].cpu() for _ in outputs]) / len(outputs))
val_clip_loss_mean = (sum([_['clip_loss'].cpu() for _ in outputs]) / len(outputs))
if (not self.opt.joint_out):
val_grammar_loss_mean = (sum([_['grammar_loss'].cpu() for _ in outputs]) / len(outputs))
print('loss', val_loss_mean.item())
print('clip_loss', val_clip_loss_mean.item())
if (not self.opt.joint_out):
print('grammar_loss', val_grammar_loss_mean.item())
logit_scale = self.model.clip_model.logit_scale.exp().cpu()
text_feats = torch.cat([_['text_feat'].cpu() for _ in outputs], dim=0)
img_feats = torch.cat([_['img_feat'].cpu() for _ in outputs], dim=0)
assert (text_feats.size() == (5000, 512)), text_feats.size()
assert (img_feats.size() == (5000, 512)), img_feats.size()
logits_per_text = (torch.matmul(text_feats, img_feats.t()) * logit_scale)
logits_per_image = logits_per_text.T
print('Text-to-Image retrieval')
for k in [1, 5, 10]:
text_to_image_topk = logits_per_text.topk(k, dim=1).indices
n_text = len(text_to_image_topk)
labels = torch.arange(0, n_text).view((- 1), 1)
n_retrieved = ((text_to_image_topk == labels).sum(dim=1) > 0).sum()
recall_k = ((n_retrieved / n_text) * 100)
out[f'text_to_image_recall_{k}'] = recall_k.item()
print(f'{k}: {recall_k.item():.2f}%')
print('Image-to-Text retrieval')
for k in [1, 5, 10]:
image_to_text_topk = logits_per_image.topk(k, dim=1).indices
n_image = len(image_to_text_topk)
labels = torch.arange(0, n_image).view((- 1), 1)
n_retrieved = ((image_to_text_topk == labels).sum(dim=1) > 0).sum()
recall_k = ((n_retrieved / n_image) * 100)
out[f'image_to_text_recall_{k}'] = recall_k.item()
print(f'{k}: {recall_k.item():.2f}%')
out.update({'loss': val_loss_mean.item(), 'clip_loss': val_clip_loss_mean.item()})
if (not self.opt.joint_out):
grammar_pos_pred = torch.cat([_['grammar_pos_pred'].cpu() for _ in outputs], dim=0)
grammar_neg_pred = torch.cat([_['grammar_neg_pred'].cpu() for _ in outputs], dim=0)
TP = (grammar_pos_pred == 1).sum().item()
FP = (grammar_pos_pred == 0).sum().item()
FN = (grammar_neg_pred == 1).sum().item()
TN = (grammar_neg_pred == 0).sum().item()
print('Grammar check')
print(f'TP: {TP} FP: {FP} FN: {FN} TN: {TN}')
precision = ((TP / (TP + FP)) * 100)
recall = ((TP / (TP + FN)) * 100)
accuracy = (((TP + TN) / (((TP + FP) + FN) + TN)) * 100)
f1 = (((2 * precision) * recall) / (precision + recall))
print(f'Precision: {precision:.2f}%')
print(f'Recall: {recall:.2f}%')
print(f'Accuracy: {accuracy:.2f}%')
print(f'F1: {f1:.2f}%')
print('Total: {}'.format(len(grammar_pos_pred)))
out.update({'grammar_loss': val_grammar_loss_mean, 'grammar_precision': precision, 'grammar_recall': recall, 'grammar_accuracy': accuracy, 'grammar_f1': f1})
else:
out = {}
out = d2comm.all_gather(out)[0]
assert (len(out) > 0)
out = {k: (torch.tensor(v) if (not torch.is_tensor(v)) else v) for (k, v) in out.items()}
for (k, v) in out.items():
self.log(f'{split}/{k}', v)
def test_epoch_end(self, outputs):
self.validation_epoch_end(outputs, 'test')
def configure_optimizers(self):
if (self.args.optim == 'adamw'):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.args.weight_decay}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
for group in optimizer_grouped_parameters:
group['params'] = [p for p in group['params'] if p.requires_grad]
from transformers.optimization import AdamW
optim = AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_eps)
optimizers = [optim]
lr_schedulers = []
return (optimizers, lr_schedulers)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, *args, **kwargs):
super().optimizer_step(epoch, batch_idx, optimizer, optimizer_idx, *args, **kwargs)
def state_dict(self):
state_dict = self.model.state_dict()
device = next(iter(state_dict.values())).device
assert (('_vocab' not in state_dict) and ('_opt' not in state_dict)), 'Just in case'
return state_dict
def load_state_dict(self, state_dict=None, strict=True):
self.model.load_state_dict(state_dict, strict) |
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
x = x.view(x.size(0), (- 1))
return x |
class ShmDataset(IterableDataset):
def __init__(self, shm_context):
self.shm_context = shm_context
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
worker_id = (0 if (worker_info is None) else worker_info.id)
while True:
data = self.shm_context.get_data(worker_id)
if (data is None):
break
(yield data) |
class NearRewardConfig(RewardConfig):
def __init__(self, coeff):
self.coeff = coeff
def create_reward_shaper(self):
return NearRewardShaper(self.coeff) |
def test_faso_error_checks():
with pytest.raises(ValueError):
FASO(FASO(RMSProp(0.01)))
with pytest.raises(ValueError):
FASO(RMSProp(0.01), mcse_threshold=0)
with pytest.raises(ValueError):
FASO(RMSProp(0.01), W_min=0)
with pytest.raises(ValueError):
FASO(RMSProp(0.01), k_check=0)
with pytest.raises(ValueError):
FASO(RMSProp(0.01), ESS_min=0) |
def test_locallygrouped_self_attention_module():
LSA = LocallyGroupedSelfAttention(embed_dims=32, window_size=3)
outs = LSA(torch.randn(1, 3136, 32), (56, 56))
assert (outs.shape == torch.Size([1, 3136, 32])) |
def predictor_exptrsonpath_set(exptrs):
from phcpy.phcpy2c3 import py2c_set_value_of_continuation_parameter as set
return set(17, exptrs) |
class TestInferenceDropout(unittest.TestCase):
def setUp(self):
(self.task, self.parser) = get_dummy_task_and_parser()
TransformerModel.add_args(self.parser)
self.args = self.parser.parse_args([])
self.args.encoder_layers = 2
self.args.decoder_layers = 1
def test_sets_inference_dropout_to_true(self):
self.args.retain_dropout = True
self.transformer_model = TransformerModel.build_model(self.args, self.task)
self.transformer_model.prepare_for_inference_(self.args)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.apply_during_inference
def test_inference_dropout_false_by_default(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
self.transformer_model.prepare_for_inference_(self.args)
assert (not self.transformer_model.encoder.dropout_module.apply_during_inference)
assert (not self.transformer_model.decoder.dropout_module.apply_during_inference)
for layer in self.transformer_model.encoder.layers:
assert (not layer.dropout_module.apply_during_inference)
for layer in self.transformer_model.decoder.layers:
assert (not layer.dropout_module.apply_during_inference)
def test_applies_training_mode(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
assert self.transformer_model.encoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.training
self.transformer_model.eval()
assert (not self.transformer_model.decoder.dropout_module.training)
for layer in self.transformer_model.encoder.layers:
assert (not layer.dropout_module.training)
def test_retain_modules(self):
self.args.retain_dropout = True
self.args.retain_dropout_modules = ['TransformerEncoder', 'TransformerEncoderLayer']
self.transformer_model = TransformerModel.build_model(self.args, self.task)
self.transformer_model.prepare_for_inference_(self.args)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert (not self.transformer_model.decoder.dropout_module.apply_during_inference)
for layer in self.transformer_model.decoder.layers:
assert (not layer.dropout_module.apply_during_inference) |
def test_abs(args, device_id, pt, step):
device = ('cpu' if (args.visible_gpus == '-1') else 'cuda')
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info(('Loading checkpoint from %s' % test_from))
checkpoint = torch.load(test_from, map_location=(lambda storage, loc: storage))
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.test_batch_size, device, shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained('dmis-lab/biobert-base-cased-v1.1', do_lower_case=False, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused99]'], 'EOS': tokenizer.vocab['[unused1]'], 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step) |
def GetCifar10():
if (not os.path.isdir('data/')):
os.system('mkdir data/')
if (not os.path.exists('data/cifar10.zip')):
os.system('wget -P data/')
os.chdir('./data')
os.system('unzip -u cifar10.zip')
os.chdir('..') |
def _test():
import torch
pretrained = False
models = [resattnet56, resattnet92, resattnet128, resattnet164, resattnet200, resattnet236, resattnet452]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != resattnet56) or (weight_count == ))
assert ((model != resattnet92) or (weight_count == ))
assert ((model != resattnet128) or (weight_count == ))
assert ((model != resattnet164) or (weight_count == ))
assert ((model != resattnet200) or (weight_count == ))
assert ((model != resattnet236) or (weight_count == ))
assert ((model != resattnet452) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
class MAMuJoCo():
def __init__(self, scenario):
env_config = get_env_config(scenario)
self._environment = gymnasium_robotics.mamujoco_v0.parallel_env(**env_config)
self.info_spec = {'state': self._environment.state()}
def reset(self):
(observations, info) = self._environment.reset()
info['state'] = self._environment.state().astype('float32')
return (observations, info)
def step(self, actions):
(observations, rewards, terminals, trunctations, info) = self._environment.step(actions)
info['state'] = self._environment.state().astype('float32')
return (observations, rewards, terminals, trunctations, info)
def __getattr__(self, name: str):
if hasattr(self.__class__, name):
return self.__getattribute__(name)
else:
return getattr(self._environment, name) |
class ValueFunction(nn.Module):
def __init__(self, horizon, transition_dim, cond_dim, dim=32, dim_mults=(1, 2, 4, 8), out_dim=1):
super().__init__()
dims = [transition_dim, *map((lambda m: (dim * m)), dim_mults)]
in_out = list(zip(dims[:(- 1)], dims[1:]))
time_dim = dim
self.time_mlp = nn.Sequential(SinusoidalPosEmb(dim), nn.Linear(dim, (dim * 4)), nn.Mish(), nn.Linear((dim * 4), dim))
self.blocks = nn.ModuleList([])
num_resolutions = len(in_out)
print(in_out)
for (ind, (dim_in, dim_out)) in enumerate(in_out):
is_last = (ind >= (num_resolutions - 1))
self.blocks.append(nn.ModuleList([ResidualTemporalBlock(dim_in, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), ResidualTemporalBlock(dim_out, dim_out, kernel_size=5, embed_dim=time_dim, horizon=horizon), Downsample1d(dim_out)]))
if (not is_last):
horizon = (horizon // 2)
mid_dim = dims[(- 1)]
mid_dim_2 = (mid_dim // 2)
mid_dim_3 = (mid_dim // 4)
self.mid_block1 = ResidualTemporalBlock(mid_dim, mid_dim_2, kernel_size=5, embed_dim=time_dim, horizon=horizon)
self.mid_down1 = Downsample1d(mid_dim_2)
horizon = (horizon // 2)
self.mid_block2 = ResidualTemporalBlock(mid_dim_2, mid_dim_3, kernel_size=5, embed_dim=time_dim, horizon=horizon)
self.mid_down2 = Downsample1d(mid_dim_3)
horizon = (horizon // 2)
fc_dim = (mid_dim_3 * max(horizon, 1))
self.final_block = nn.Sequential(nn.Linear((fc_dim + time_dim), (fc_dim // 2)), nn.Mish(), nn.Linear((fc_dim // 2), out_dim))
def forward(self, x, cond, time, *args):
x = einops.rearrange(x, 'b h t -> b t h')
t = self.time_mlp(time)
for (resnet, resnet2, downsample) in self.blocks:
x = resnet(x, t)
x = resnet2(x, t)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_down1(x)
x = self.mid_block2(x, t)
x = self.mid_down2(x)
x = x.view(len(x), (- 1))
out = self.final_block(torch.cat([x, t], dim=(- 1)))
return out |
class FakeTokyo(FakeBackend):
def __init__(self):
cmap = [[0, 1], [0, 5], [1, 0], [1, 2], [1, 6], [2, 1], [2, 3], [2, 6], [3, 2], [3, 8], [3, 9], [4, 8], [4, 9], [5, 0], [5, 6], [5, 10], [5, 11], [6, 1], [6, 2], [6, 5], [6, 7], [6, 10], [6, 11], [7, 1], [7, 6], [7, 8], [7, 12], [7, 13], [8, 3], [8, 4], [8, 7], [8, 9], [8, 12], [8, 13], [9, 3], [9, 4], [9, 8], [10, 5], [10, 6], [10, 11], [10, 15], [11, 5], [11, 6], [11, 10], [11, 12], [11, 16], [11, 17], [12, 7], [12, 8], [12, 11], [12, 13], [12, 16], [13, 7], [13, 8], [13, 12], [13, 14], [13, 18], [13, 19], [14, 13], [14, 18], [14, 19], [15, 10], [15, 16], [16, 11], [16, 12], [16, 15], [16, 17], [17, 11], [17, 16], [18, 13], [18, 14], [19, 13], [19, 14]]
configuration = QasmBackendConfiguration(backend_name='fake_tokyo', backend_version='0.0.0', n_qubits=16, basis_gates=['u1', 'u2', 'u3', 'cx', 'id'], simulator=False, local=True, conditional=False, open_pulse=False, memory=False, max_shots=65536, gates=[GateConfig(name='TODO', parameters=[], qasm_def='TODO')], coupling_map=cmap)
super().__init__(configuration) |
class CombineDBs(data.Dataset):
NUM_CLASSES = 21
def __init__(self, dataloaders, excluded=None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
for dl in dataloaders:
for elem in dl.im_ids:
if (elem not in self.im_ids):
self.im_ids.append(elem)
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if (elem in self.im_ids):
self.im_ids.remove(elem)
self.cat_list = []
self.im_list = []
new_im_ids = []
num_images = 0
for (ii, dl) in enumerate(dataloaders):
for (jj, curr_im_id) in enumerate(dl.im_ids):
if ((curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids)):
num_images += 1
new_im_ids.append(curr_im_id)
self.cat_list.append({'db_ii': ii, 'cat_ii': jj})
self.im_ids = new_im_ids
print('Combined number of images: {:d}'.format(num_images))
def __getitem__(self, index):
_db_ii = self.cat_list[index]['db_ii']
_cat_ii = self.cat_list[index]['cat_ii']
sample = self.dataloaders[_db_ii].__getitem__(_cat_ii)
if ('meta' in sample.keys()):
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.cat_list)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return (((('Included datasets:' + str(include_db)) + '\n') + 'Excluded datasets:') + str(exclude_db)) |
class QuestionAnsweringTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str='eval'):
eval_dataset = (self.eval_dataset if (eval_dataset is None) else eval_dataset)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = (self.eval_examples if (eval_examples is None) else eval_examples)
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = (self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop)
try:
output = eval_loop(eval_dataloader, description='Evaluation', prediction_loss_only=(True if (compute_metrics is None) else None), ignore_keys=ignore_keys)
finally:
self.compute_metrics = compute_metrics
if ((self.post_process_function is not None) and (self.compute_metrics is not None)):
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
metrics = self.compute_metrics(eval_preds)
for key in list(metrics.keys()):
if (not key.startswith(f'{metric_key_prefix}_')):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key)
self.log(metrics)
else:
metrics = {}
if (self.args.tpu_metrics_debug or self.args.debug):
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str='test'):
predict_dataloader = self.get_test_dataloader(predict_dataset)
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = (self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop)
try:
output = eval_loop(predict_dataloader, description='Prediction', prediction_loss_only=(True if (compute_metrics is None) else None), ignore_keys=ignore_keys)
finally:
self.compute_metrics = compute_metrics
if ((self.post_process_function is None) or (self.compute_metrics is None)):
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, 'predict')
metrics = self.compute_metrics(predictions)
for key in list(metrics.keys()):
if (not key.startswith(f'{metric_key_prefix}_')):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) |
def generate_timestep_weights(args, num_timesteps):
weights = torch.ones(num_timesteps)
num_to_bias = int((args.timestep_bias_portion * num_timesteps))
if (args.timestep_bias_strategy == 'later'):
bias_indices = slice((- num_to_bias), None)
elif (args.timestep_bias_strategy == 'earlier'):
bias_indices = slice(0, num_to_bias)
elif (args.timestep_bias_strategy == 'range'):
range_begin = args.timestep_bias_begin
range_end = args.timestep_bias_end
if (range_begin < 0):
raise ValueError('When using the range strategy for timestep bias, you must provide a beginning timestep greater or equal to zero.')
if (range_end > num_timesteps):
raise ValueError('When using the range strategy for timestep bias, you must provide an ending timestep smaller than the number of timesteps.')
bias_indices = slice(range_begin, range_end)
else:
return weights
if (args.timestep_bias_multiplier <= 0):
return ValueError('The parameter --timestep_bias_multiplier is not intended to be used to disable the training of specific timesteps. If it was intended to disable timestep bias, use `--timestep_bias_strategy none` instead. A timestep bias multiplier less than or equal to 0 is not allowed.')
weights[bias_indices] *= args.timestep_bias_multiplier
weights /= weights.sum()
return weights |
_module()
class VarifocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0):
super(VarifocalLoss, self).__init__()
assert (use_sigmoid is True), 'Only sigmoid varifocal loss supported now.'
assert (alpha >= 0.0)
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = (self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor))
else:
raise NotImplementedError
return loss_cls |
class Exrop():
def __init__(self, binary, input, job, ropchain, bad_chars):
self.binary = binary
self.input = input
self.job = job
self.logger = job.logger
self.ropchain = ropchain
self.bad_chars = bad_chars
def run(self, timeout):
from os import environ, pathsep, unlink, symlink
from os.path import abspath, dirname, join
pp = environ['PYTHONPATH']
del environ['PYTHONPATH']
environ['PYTHONPATH'] = '{}{}{}'.format(pp, pathsep, '/exrop')
runner = abspath(join(dirname(__file__), 'exrop_runner.py'))
cmd = ['/usr/bin/python3', runner, self.binary, self.ropchain]
if self.bad_chars:
cmd += [self.bad_chars]
self.logger.debug('RUN exrop run {}'.format(' '.join(cmd)))
process = Popen(cmd, env=environ, stderr=STDOUT, stdout=PIPE)
environ['PYTHONPATH'] = pp
try:
stdout = process.communicate(timeout=timeout)[0]
self.logger.debug('exrop runner output:')
self.logger.debug(stdout.decode(errors='ignore'))
except TimeoutExpired:
process.kill()
self.logger.critical('FAIL TIMEOUT')
exit(3)
if (process.returncode != 0):
self.logger.error('Compilation ERROR with {} (exrop)'.format(process.returncode))
exit(1) |
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet18'])
model_dict = model.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model |
def ResNet50(output_stride, BatchNorm, pretrained_url=None):
model = ResNet(Bottleneck, [3, 4, 6, 3], output_stride, BatchNorm, pretrained_url)
return model |
def get_dataset_with_opts(opts_dic, mode):
dataset_opts = opts_dic['{}_dataset'.format(mode)]
if isinstance(dataset_opts, list):
used_datasets = []
dataset_info = []
for dataset_part_opts in dataset_opts:
dataset_name = dataset_part_opts['type']
if (dataset_name in manager.DATASETS.modules_dict):
Dataset = manager.DATASETS[dataset_name]
else:
raise NotImplementedError('The dataset was not found {}'.format(dataset_name))
_params = dataset_part_opts['params']
_params['dataset_mode'] = mode
used_datasets.append(Dataset(**_params))
for info_line in used_datasets[(- 1)].dataset_info:
dataset_info.append(info_line)
cat_dataset = ConcatDataset(used_datasets)
cat_dataset.dataset_info = dataset_info
return cat_dataset
else:
dataset_name = opts_dic['{}_dataset'.format(mode)]['type']
if (dataset_name in manager.DATASETS.modules_dict):
Dataset = manager.DATASETS[dataset_name]
else:
raise NotImplementedError('The dataset was not found {}'.format(dataset_name))
_params = opts_dic['{}_dataset'.format(mode)]['params']
_params['dataset_mode'] = mode
return Dataset(**_params) |
def get_arg_sets(arg_dict):
arg_sets = [{}]
for (arg, vals) in arg_dict.items():
prev_arg_sets = arg_sets
arg_sets = []
if isinstance(vals, list):
for val in vals:
for prev_arg_set in prev_arg_sets:
arg_set = copy.copy(prev_arg_set)
arg_set[arg] = val
arg_sets.append(arg_set)
else:
val = vals
for prev_arg_set in prev_arg_sets:
arg_set = copy.copy(prev_arg_set)
arg_set[arg] = val
arg_sets.append(arg_set)
return arg_sets |
class SmoothL1(Loss):
def __init__(self):
self.loss = nn.SmoothL1Loss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets) |
def print_final_results(history):
print_string = '\nFinal Results: '
for (name, value) in history.items():
if ('.' in name):
n_split = name.split('.')
name = ((n_split[1].title() + ' ') + n_split[0])
if ('acc' in name):
val_str = '{:.1f}%'.format((value * 100))
else:
val_str = '{:.3f}'.format(value)
print_string += ' | {}: {}'.format(name, val_str)
print(print_string) |
def change_transform_origin(transform, center):
center = np.array(center)
return np.linalg.multi_dot([translation(center), transform, translation((- center))]) |
def _multi_instance_helper(model, recv_queue, send_queue, next_idx):
from bigdl.nano.pytorch import InferenceOptimizer
with InferenceOptimizer.get_context(model):
while True:
try:
args = recv_queue.get()
if isinstance(args, DataLoader):
dataset = args.dataset
batch_size = args.batch_size
length = len(args)
idx = get_next_idx(next_idx)
while (idx < length):
batch = [dataset[i] for i in range((idx * batch_size), ((idx + 1) * batch_size))]
inputs = args.collate_fn(batch)
new_idx = get_next_idx(next_idx)
inference(idx, inputs, model, send_queue)
idx = new_idx
else:
(idx, inputs) = args
inference(idx, inputs, model, send_queue)
except Exception as e:
send_queue.put((idx, e)) |
def fliplr(img):
inv_idx = torch.arange((img.size(3) - 1), (- 1), (- 1)).long()
img_flip = img.index_select(3, inv_idx)
return img_flip |
def main():
boolean = (lambda x: bool(['False', 'True'].index(x)))
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--area', nargs=2, type=int, default=(64, 64))
parser.add_argument('--view', type=int, nargs=2, default=(9, 9))
parser.add_argument('--length', type=int, default=None)
parser.add_argument('--health', type=int, default=9)
parser.add_argument('--window', type=int, nargs=2, default=(600, 600))
parser.add_argument('--size', type=int, nargs=2, default=(0, 0))
parser.add_argument('--record', type=str, default=None)
parser.add_argument('--fps', type=int, default=5)
parser.add_argument('--wait', type=boolean, default=False)
parser.add_argument('--death', type=str, default='reset', choices=['continue', 'reset', 'quit'])
parser.add_argument('--env', type=str, default='custom')
args = parser.parse_args()
keymap = {pygame.K_a: 'move_left', pygame.K_d: 'move_right', pygame.K_w: 'move_up', pygame.K_s: 'move_down', pygame.K_SPACE: 'do', pygame.K_r: 'place_stone', pygame.K_t: 'place_table', pygame.K_f: 'place_furnace', pygame.K_1: 'make_wood_pickaxe', pygame.K_2: 'make_stone_pickaxe', pygame.K_3: 'make_iron_pickaxe', pygame.K_4: 'make_wood_sword', pygame.K_5: 'make_stone_sword', pygame.K_6: 'make_iron_sword'}
print('Actions:')
for (key, action) in keymap.items():
print(f' {pygame.key.name(key)}: {action}')
size = list(args.size)
size[0] = (size[0] or args.window[0])
size[1] = (size[1] or args.window[1])
if (('_' in args.env) or ('alltasks' in args.env)):
env_module = importlib.import_module(f'envs.env_{args.env}')
else:
env_module = importlib.import_module('crafter.env')
env = env_module.Env(area=args.area, view=args.view, length=args.length, seed=args.seed)
env = crafter.Recorder(env, args.record)
env.reset()
achievements = set()
duration = 0
return_ = 0
was_done = False
print('Diamonds exist:', env._world.count('diamond'))
print('Coal exist:', env._world.count('coal'))
print('Wood exist:', env._world.count('tree'))
print('Iron exist:', env._world.count('iron'))
print('Stone exist:', env._world.count('stone'))
pygame.init()
screen = pygame.display.set_mode(args.window)
clock = pygame.time.Clock()
running = True
while running:
image = env.render(size=size, add_desc=True)
if (size != args.window):
image = Image.fromarray(image)
image = image.resize(args.window, resample=Image.NEAREST)
image = np.array(image)
surface = pygame.surfarray.make_surface(image.transpose((1, 0, 2)))
screen.blit(surface, (0, 0))
pygame.display.flip()
clock.tick(args.fps)
action = None
pygame.event.pump()
for event in pygame.event.get():
if (event.type == pygame.QUIT):
running = False
elif ((event.type == pygame.KEYDOWN) and (event.key == pygame.K_ESCAPE)):
running = False
elif ((event.type == pygame.KEYDOWN) and (event.key in keymap.keys())):
action = keymap[event.key]
if (action is None):
pressed = pygame.key.get_pressed()
for (key, action) in keymap.items():
if pressed[key]:
break
else:
if (args.wait and (not env._player.sleeping)):
continue
else:
action = 'noop'
(_, reward, done, _, _) = env.step(env.action_names.index(action))
duration += 1
unlocked = {name for (name, count) in env._player.achievements.items() if ((count > 0) and (name not in achievements))}
for name in unlocked:
achievements |= unlocked
total = len(env._player.achievements.keys())
print(f'Achievement ({len(achievements)}/{total}): {name}')
if ((env._step > 0) and ((env._step % 100) == 0)):
print(f'Time step: {env._step}')
if reward:
print(f'Reward: {reward}')
return_ += reward
if (done and (not was_done)):
was_done = True
print('Episode done!')
print('Duration:', duration)
print('Return:', return_)
if (args.death == 'quit'):
running = False
if (args.death == 'reset'):
print('\nStarting a new episode.')
env.reset()
achievements = set()
was_done = False
duration = 0
return_ = 0
if (args.death == 'continue'):
pass
pygame.quit() |
def compute_aff(x: Tensor, similarity: str='cosine') -> Tensor:
if (similarity == 'cosine'):
x = torch.mm(x, x.t())
elif (similarity == 'euclidean'):
x = x.unsqueeze(0)
x = torch.cdist(x, x, p=2)
x = x.squeeze(0)
x = (- x)
else:
raise NotImplementedError(f'Incorrect similarity measure: {similarity}')
return x |
class Prop_Gerund_Verbs(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_gerunds, tot_num_verbs) = (0, 0)
for so in self.sentence_objs:
tot_num_gerunds += num_gerund_verbs(so.stanza_doc)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if (tot_num_verbs != 0):
return (tot_num_gerunds / tot_num_verbs)
return NOT_AVAILABLE |
def get_processed_dataset(key):
path = os.path.join(SOURCE_DATASET_DIR, paths[key])
processed = preprocess_functions[key](path)
return processed |
def MLP(channels):
return Sequential(*[Sequential(Linear(channels[(i - 1)], channels[i]), SiLU()) for i in range(1, len(channels))]) |
_model
def gluon_resnet50_v1s(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep', **kwargs)
return _create_resnet('gluon_resnet50_v1s', pretrained, **model_args) |
def leaky_relu(input_, leakiness=0.2):
assert (leakiness <= 1)
return tf.maximum(input_, (leakiness * input_)) |
def get_dummy_databunch() -> ImageDataBunch:
path = Path('./dummy/')
return get_colorize_data(sz=1, bs=1, crappy_path=path, good_path=path, keep_pct=0.001) |
class ModerateCNNCeleba(nn.Module):
def __init__(self):
super(ModerateCNNCeleba, self).__init__()
self.conv_layer = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2))
self.fc_layer = nn.Sequential(nn.Dropout(p=0.1), nn.Linear(4096, 512), nn.ReLU(inplace=True), nn.Linear(512, 512), nn.ReLU(inplace=True), nn.Dropout(p=0.1), nn.Linear(512, 2))
def forward(self, x):
x = self.conv_layer(x)
x = x.view((- 1), 4096)
x = self.fc_layer(x)
return x |
class AlignLinear(torch.nn.Module):
def __init__(self, config, vocab, max_len_token):
super(AlignLinear, self).__init__()
self.config = config
self.vocab = vocab
self.max_len_token = max_len_token
self.need_flatten = True
self.EMB = EMB((vocab.size + 1), config.embedding_dim)
self.LSTM = LSTM(self.EMB, config.embedding_dim, config.rnn_hidden_dim, config.bidirectional)
self.align_weights = nn.Parameter(torch.randn(max_len_token, max_len_token), requires_grad=True)
self.ones = Variable(torch.ones(config.train_batch_size, 1))
self.loss = BCEWithLogitsLoss()
def compute_loss(self, qry_tk, pos_tk, neg_tk):
(qry_lkup, pos_lkup, neg_lkup) = get_qry_pos_neg_tok_lookup(self.vocab, qry_tk, pos_tk, neg_tk)
(qry_emb, qry_mask) = self.LSTM(torch.from_numpy(qry_lkup).cuda())
(pos_emb, pos_mask) = self.LSTM(torch.from_numpy(pos_lkup).cuda())
(neg_emb, neg_mask) = self.LSTM(torch.from_numpy(neg_lkup).cuda())
loss = self.loss((self.score_pair_train(qry_emb, pos_emb, qry_mask, pos_mask) - self.score_pair_train(qry_emb, neg_emb, qry_mask, neg_mask)), self.ones)
return loss
def score_pair_train(self, qry_emb, cnd_emb, qry_mask, cnd_mask):
qry_cnd_sim = torch.bmm(qry_emb, torch.transpose(cnd_emb, 2, 1))
qry_mask = qry_mask.unsqueeze(dim=2)
cnd_mask = cnd_mask.unsqueeze(dim=1)
qry_cnd_mask = torch.bmm(qry_mask, cnd_mask)
qry_cnd_sim = torch.mul(qry_cnd_sim, qry_cnd_mask)
output = torch.sum((self.align_weights.expand_as(qry_cnd_sim) * qry_cnd_sim), dim=1, keepdim=True)
output = torch.sum(output, dim=2, keepdim=True)
output = torch.squeeze(output, dim=2)
return output
def score_dev_test_batch(self, qry_tk, cnd_tk):
(qry_lkup, cnd_lkup) = get_qry_cnd_tok_lookup(self.vocab, qry_tk, cnd_tk)
(qry_emb, qry_mask) = self.LSTM(torch.from_numpy(qry_lkup).cuda())
(cnd_embed, cnd_mask) = self.LSTM(torch.from_numpy(cnd_lkup).cuda())
scores = self.score_pair_train(qry_emb, cnd_embed, qry_mask, cnd_mask)
return scores
def flatten_parameters(self):
self.LSTM.flatten_parameters() |
class Turtlebot(Roomba):
def __init__(self):
super(Turtlebot, self).__init__()
def start(self, tty='/dev/ttyUSB0', baudrate=57600):
super(Turtlebot, self).start(tty, baudrate)
self.sci.add_opcodes(CREATE_OPCODES)
def control(self):
logging.info('sending control opcodes.')
self.passive()
if self.safe:
self.sci.safe()
else:
self.sci.full()
time.sleep(0.5)
def power_low_side_drivers(self, drivers):
assert (len(drivers) == 3), 'Expecting 3 low side driver power settings.'
byte = 0
for (driver, power) in enumerate(drivers):
byte += ((2 ** driver) * int(power))
self.sci.low_side_drivers(byte)
def set_digital_outputs(self, value):
self.sci.digital_outputs(value)
def soft_reset(self):
logging.info('sending soft reset.')
self.sci.soft_reset()
time.sleep(START_DELAY)
self.passive() |
class NVDM(object):
def __init__(self, vocab_size, n_hidden, n_topic, n_sample, learning_rate, batch_size, non_linearity):
self.vocab_size = vocab_size
self.n_hidden = n_hidden
self.n_topic = n_topic
self.n_sample = n_sample
self.non_linearity = non_linearity
self.learning_rate = learning_rate
self.batch_size = batch_size
self.x = tf.placeholder(tf.float32, [None, vocab_size], name='input')
self.mask = tf.placeholder(tf.float32, [None], name='mask')
with tf.variable_scope('encoder'):
self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity)
self.mean = utils.linear(self.enc_vec, self.n_topic, scope='mean')
self.logsigm = utils.linear(self.enc_vec, self.n_topic, bias_start_zero=True, matrix_start_zero=True, scope='logsigm')
self.kld = ((- 0.5) * tf.reduce_sum((((1 - tf.square(self.mean)) + (2 * self.logsigm)) - tf.exp((2 * self.logsigm))), 1))
self.kld = (self.mask * self.kld)
with tf.variable_scope('decoder'):
if (self.n_sample == 1):
eps = tf.random_normal((batch_size, self.n_topic), 0, 1)
doc_vec = (tf.mul(tf.exp(self.logsigm), eps) + self.mean)
logits = tf.nn.log_softmax(utils.linear(doc_vec, self.vocab_size, scope='projection'))
self.recons_loss = (- tf.reduce_sum(tf.mul(logits, self.x), 1))
else:
eps = tf.random_normal(((self.n_sample * batch_size), self.n_topic), 0, 1)
eps_list = tf.split(0, self.n_sample, eps)
recons_loss_list = []
for i in range(self.n_sample):
if (i > 0):
tf.get_variable_scope().reuse_variables()
curr_eps = eps_list[i]
doc_vec = (tf.mul(tf.exp(self.logsigm), curr_eps) + self.mean)
logits = tf.nn.log_softmax(utils.linear(doc_vec, self.vocab_size, scope='projection'))
recons_loss_list.append((- tf.reduce_sum(tf.mul(logits, self.x), 1)))
self.recons_loss = (tf.add_n(recons_loss_list) / self.n_sample)
self.objective = (self.recons_loss + self.kld)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
fullvars = tf.trainable_variables()
enc_vars = utils.variable_parser(fullvars, 'encoder')
dec_vars = utils.variable_parser(fullvars, 'decoder')
enc_grads = tf.gradients(self.objective, enc_vars)
dec_grads = tf.gradients(self.objective, dec_vars)
self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))
self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars)) |
class DiseasesLPDataset():
def __init__(self, name, val_prop, test_prop, normalize_adj, normalize_feats):
self.path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name)
(G, features) = load_data(self.path, name, val_prop, test_prop, normalize_adj, normalize_feats)
self.num_features = features.shape[1]
edge_index = torch.tensor(list(G.edges)).t().contiguous()
(edge_index, _) = remove_self_loops(edge_index)
self.dataset = Data(edge_index=edge_index, x=features)
self.reconstruction_loss = None
def create_loaders(self) -> Tuple[(DataLoader, DataLoader)]:
return ([self.dataset], [self.dataset]) |
class QuestionProcessor():
def __init__(self, max_words=50):
self.max_words = max_words
def __call__(self, question):
return self.pre_question(question)
def pre_question(self, question):
question = re.sub('([.!\\"()*#:;~])', '', question.lower())
question = question.rstrip(' ')
question_words = question.split(' ')
if (len(question_words) > self.max_words):
question = ' '.join(question_words[:self.max_words])
return question |
def parse_args():
parser = argparse.ArgumentParser(description='\nCompute metrics for trackers using MOTChallenge ground-truth data.\nFiles\n-----\nAll file content, ground truth and test files, have to comply with the\nformat described in \nMilan, Anton, et al. \n"Mot16: A benchmark for multi-object tracking." \narXiv preprint arXiv:1603.00831 (2016).\n for ground truth data\n <GT_ROOT>/<SEQUENCE_1>/gt/gt.txt\n <GT_ROOT>/<SEQUENCE_2>/gt/gt.txt\n ...\nLayout for test data\n <TEST_ROOT>/<SEQUENCE_1>.txt\n <TEST_ROOT>/<SEQUENCE_2>.txt\n ...\nSequences of ground truth and test will be matched according to the `<SEQUENCE_X>`\nstring.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--groundtruths', type=str, help='Directory containing ground truth files.')
parser.add_argument('--tests', type=str, help='Directory containing tracker result files')
parser.add_argument('--score_threshold', type=float, help='Score threshold', default=0.5)
parser.add_argument('--gt_type', type=str, default='')
parser.add_argument('--eval_official', action='store_true')
parser.add_argument('--loglevel', type=str, help='Log level', default='info')
parser.add_argument('--fmt', type=str, help='Data format', default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use')
return parser.parse_args() |
def robust_loss(net, epsilon, X, y, size_average=True, device_ids=None, parallel=False, **kwargs):
if parallel:
f = nn.DataParallel(RobustBounds(net, epsilon, **kwargs))(X, y)
else:
f = RobustBounds(net, epsilon, **kwargs)(X, y)
err = (f.max(1)[1] != y)
if size_average:
err = (err.sum().item() / X.size(0))
ce_loss = nn.CrossEntropyLoss(reduce=size_average)(f, y)
return (ce_loss, err) |
def get_configs_from_pipeline_file():
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model
if FLAGS.eval_training_data:
eval_config = pipeline_config.train_config
else:
eval_config = pipeline_config.eval_config
input_config = pipeline_config.eval_input_reader
return (model_config, eval_config, input_config) |
def pix2pix_generator(net, num_outputs, blocks=None, upsample_method='nn_upsample_conv', is_training=False):
end_points = {}
blocks = (blocks or _default_generator_blocks())
input_size = net.get_shape().as_list()
(height, width) = (input_size[1], input_size[2])
if (height != width):
raise ValueError('The input height must match the input width.')
input_size[3] = num_outputs
upsample_fn = functools.partial(upsample, method=upsample_method)
encoder_activations = []
with tf.variable_scope('encoder'):
with tf.contrib.framework.arg_scope([layers.conv2d], kernel_size=[4, 4], stride=2, activation_fn=tf.nn.leaky_relu):
for (block_id, block) in enumerate(blocks):
if (block_id == 0):
net = layers.conv2d(net, block.num_filters, normalizer_fn=None)
elif (block_id < (len(blocks) - 1)):
net = layers.conv2d(net, block.num_filters)
else:
net = layers.conv2d(net, block.num_filters, activation_fn=None, normalizer_fn=None)
encoder_activations.append(net)
end_points[('encoder%d' % block_id)] = net
reversed_blocks = list(blocks)
reversed_blocks.reverse()
with tf.variable_scope('decoder'):
with tf.contrib.framework.arg_scope([layers.dropout], is_training=True):
for (block_id, block) in enumerate(reversed_blocks):
if (block_id > 0):
net = tf.concat([net, encoder_activations[((- block_id) - 1)]], axis=3)
net = tf.nn.relu(net)
net = upsample_fn(net, block.num_filters, [2, 2])
if (block.decoder_keep_prob > 0):
net = layers.dropout(net, keep_prob=block.decoder_keep_prob)
end_points[('decoder%d' % block_id)] = net
with tf.variable_scope('output'):
logits = layers.conv2d(net, num_outputs, [4, 4], activation_fn=None)
logits = tf.reshape(logits, input_size)
end_points['logits'] = logits
end_points['predictions'] = tf.tanh(logits)
return (logits, end_points) |
class convnet32():
def __init__(self, model_params, nkerns=[1, 8, 4, 2], ckern=128, filter_sizes=[5, 5, 5, 5, 4]):
(self.num_hid, num_dims, num_class, self.batch_size, self.num_channels) = model_params
self.D = int(np.sqrt((num_dims / self.num_channels)))
numpy_rng = np.random.RandomState(1234)
self.nkerns = (np.asarray(nkerns) * ckern)
self.nkerns[0] = self.num_channels
self.filter_sizes = filter_sizes
num_convH = ((self.nkerns[(- 1)] * filter_sizes[(- 1)]) * filter_sizes[(- 1)])
self.W = initialize_weight(num_convH, self.num_hid, 'W', numpy_rng, 'uniform')
self.hbias = theano.shared(np.zeros((self.num_hid,), dtype=theano.config.floatX), name='hbias_enc')
self.W_y = initialize_weight(self.num_hid, num_class, 'W_y', numpy_rng, 'uniform')
self.L1 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[0], bnkern=self.nkerns[1], bfilter_sz=filter_sizes[0], tfilter_sz=filter_sizes[1])
self.L2 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[1], bnkern=self.nkerns[2], bfilter_sz=filter_sizes[1], tfilter_sz=filter_sizes[2])
self.L3 = BN_Conv_layer(self.batch_size, numpy_rng, tnkern=self.nkerns[2], bnkern=self.nkerns[3], bfilter_sz=filter_sizes[2], tfilter_sz=filter_sizes[3])
self.num_classes = num_class
self.params = ((([self.W_y, self.W, self.hbias] + self.L1.params) + self.L2.params) + self.L3.params)
def propagate(self, X, num_train=None, atype='relu'):
image_shape0 = [X.shape[0], self.num_channels, self.D, self.D]
ConX = X.reshape(image_shape0)
H0 = self.L1.conv(ConX, atype=atype)
H1 = self.L2.conv(H0, atype=atype)
H2 = self.L3.conv(H1, atype=atype)
H2 = H2.flatten(2)
H3 = activation_fn_th((T.dot(H2, self.W) + self.hbias), atype='tanh')
y = T.nnet.sigmoid(T.dot(H3, self.W_y))
return y
def cost(self, X, y):
p_y_x = self.propagate(X)
return (- T.mean(T.log(p_y_x)[(T.arange(y.shape[0]), y)]))
def weight_decay_l2(self):
return (0.5 * (T.sum((self.W ** 2)) + T.sum((self.W_y ** 2))))
def weight_decay_l1(self):
return T.sum(abs(self.W))
def errors(self, X, y, num_train=None):
p_y_x = self.propagate(X, num_train=num_train).flatten()
pred_y = (p_y_x > 0.5)
return T.mean(T.neq(pred_y, y))
def set_params(self, params):
[self.W, self.hbias, self.W_y, self.ybias, self.W0, self.b0, self.W1, self.b1] = params
self.params = params |
class HubertModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Code2VecModelBase(abc.ABC):
def __init__(self, config: Config):
self.config = config
self.config.verify()
self._log_creating_model()
self._init_num_of_examples()
self._log_model_configuration()
self.vocabs = Code2VecVocabs(config)
self.vocabs.target_vocab.get_index_to_word_lookup_table()
self._load_or_create_inner_model()
self._initialize()
def _log_creating_model(self):
self.log('')
self.log('')
self.log('')
self.log('')
self.log(' Creating word2vec model ')
self.log('')
self.log('')
def _log_model_configuration(self):
self.log('')
self.log(' Configuration - Hyper Parameters ')
longest_param_name_len = max((len(param_name) for (param_name, _) in self.config))
for (param_name, param_val) in self.config:
self.log('{name: <{name_len}}{val}'.format(name=param_name, val=param_val, name_len=(longest_param_name_len + 2)))
self.log('')
def logger(self):
return self.config.get_logger()
def log(self, msg):
self.logger.info(msg)
def _init_num_of_examples(self):
self.log('Checking number of examples ...')
if self.config.is_training:
self.config.NUM_TRAIN_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.train_data_path)
self.log(' Number of train examples: {}'.format(self.config.NUM_TRAIN_EXAMPLES))
if self.config.is_testing:
self.config.NUM_TEST_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.TEST_DATA_PATH)
self.log(' Number of test examples: {}'.format(self.config.NUM_TEST_EXAMPLES))
def _get_num_of_examples_for_dataset(dataset_path: str) -> int:
dataset_num_examples_file_path = (dataset_path + '.num_examples')
if os.path.isfile(dataset_num_examples_file_path):
with open(dataset_num_examples_file_path, 'r') as file:
num_examples_in_dataset = int(file.readline())
else:
num_examples_in_dataset = common.count_lines_in_file(dataset_path)
with open(dataset_num_examples_file_path, 'w') as file:
file.write(str(num_examples_in_dataset))
return num_examples_in_dataset
def load_or_build(self):
self.vocabs = Code2VecVocabs(self.config)
self._load_or_create_inner_model()
def save(self, model_save_path=None):
if (model_save_path is None):
model_save_path = self.config.MODEL_SAVE_PATH
model_save_dir = '/'.join(model_save_path.split('/')[:(- 1)])
if (not os.path.isdir(model_save_dir)):
os.makedirs(model_save_dir, exist_ok=True)
self.vocabs.save(self.config.get_vocabularies_path_from_model_path(model_save_path))
self._save_inner_model(model_save_path)
def _write_code_vectors(self, file, code_vectors):
for vec in code_vectors:
file.write((' '.join(map(str, vec)) + '\n'))
def _get_attention_weight_per_context(self, path_source_strings: Iterable[str], path_strings: Iterable[str], path_target_strings: Iterable[str], attention_weights: Iterable[float]) -> Dict[(Tuple[(str, str, str)], float)]:
attention_weights = np.squeeze(attention_weights, axis=(- 1))
attention_per_context: Dict[(Tuple[(str, str, str)], float)] = {}
for (path_source, path, path_target, weight) in zip(path_source_strings, path_strings, path_target_strings, attention_weights):
string_context_triplet = (common.binary_to_string(path_source), common.binary_to_string(path), common.binary_to_string(path_target))
attention_per_context[string_context_triplet] = weight
return attention_per_context
def close_session(self):
pass
def train(self):
...
def evaluate(self) -> Optional[ModelEvaluationResults]:
...
def predict(self, predict_data_lines: Iterable[str]) -> List[ModelPredictionResults]:
...
def _save_inner_model(self, path):
...
def _load_or_create_inner_model(self):
if self.config.is_loading:
self._load_inner_model()
else:
self._create_inner_model()
def _load_inner_model(self):
...
def _create_inner_model(self):
pass
def _initialize(self):
pass
def _get_vocab_embedding_as_np_array(self, vocab_type: VocabType) -> np.ndarray:
...
def save_word2vec_format(self, dest_save_path: str, vocab_type: VocabType):
if (vocab_type not in VocabType):
raise ValueError('`vocab_type` should be `VocabType.Token`, `VocabType.Target` or `VocabType.Path`.')
vocab_embedding_matrix = self._get_vocab_embedding_as_np_array(vocab_type)
index_to_word = self.vocabs.get(vocab_type).index_to_word
with open(dest_save_path, 'w') as words_file:
common.save_word2vec_file(words_file, index_to_word, vocab_embedding_matrix) |
class Keypoints():
COCO_NAMES = ['nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle']
COCO_CONNECTIVITY = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
COCO_BONE_LOOKUP_MIXAMO = {'nose': 'NoseEnd', 'left_eye': 'LeftEyeEnd', 'right_eye': 'RightEyeEnd', 'left_ear': 'LeftEarEnd', 'right_ear': 'RightEarEnd', 'left_shoulder': 'LeftArm', 'right_shoulder': 'RightArm', 'left_elbow': 'LeftForeArm', 'right_elbow': 'RightForeArm', 'left_wrist': 'LeftHand', 'right_wrist': 'RightHand', 'left_hip': 'LeftUpLeg', 'right_hip': 'RightUpLeg', 'left_knee': 'LeftLeg', 'right_knee': 'RightLeg', 'left_ankle': 'LeftFoot', 'right_ankle': 'RightFoot'}
COCO_BONE_LOOKUP_ANIMA = {'nose': 'Head', 'left_eye': 'Head', 'right_eye': 'Head', 'left_ear': 'Head', 'right_ear': 'Head', 'left_shoulder': 'LeftArm', 'right_shoulder': 'RightArm', 'left_elbow': 'LeftForeArm', 'right_elbow': 'RightForeArm', 'left_wrist': 'LeftHand', 'right_wrist': 'RightHand', 'left_hip': 'LeftUpLeg', 'right_hip': 'RightUpLeg', 'left_knee': 'LeftLeg', 'right_knee': 'RightLeg', 'left_ankle': 'LeftFoot', 'right_ankle': 'RightFoot'}
BODY25B_NAMES = ['Nose', 'Neck', 'RShoulder', 'RElbow', 'RWrist', 'LShoulder', 'LElbow', 'LWrist', 'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle', 'REye', 'LEye', 'REar', 'LEar']
BODY25B_CONNECTIVITY = [[0, 1], [0, 2], [1, 3], [2, 4], [5, 7], [6, 8], [7, 9], [8, 10], [5, 11], [6, 12], [11, 13], [12, 14], [13, 15], [14, 16], [15, 19], [19, 20], [15, 21], [16, 22], [22, 23], [16, 24], [5, 17], [6, 17], [17, 18], [11, 12]]
BODY25B_BONE_LOOKUP_MIXAMO = {'Nose': 'NoseEnd', 'Neck': 'Head', 'RShoulder': 'RightShoulder', 'RElbow': 'RightForeArm', 'RWrist': 'RightHand', 'LShoulder': 'LeftShoulder', 'LElbow': 'LeftForeArm', 'LWrist': 'LeftHand', 'RHip': 'RightUpLeg', 'RKnee': 'RightLeg', 'RAnkle': 'RightFoot', 'LHip': 'LeftUpLeg', 'LKnee': 'LeftLeg', 'LAnkle': 'LeftFoot', 'REye': 'RightEyeEnd', 'LEye': 'LeftEyeEnd', 'REar': 'RightEarEnd', 'LEar': 'LeftEarEnd'}
BODY25B_BONE_LOOKUP_ANIMA = {'Nose': 'Head', 'Neck': 'Head', 'RShoulder': 'RightShoulder', 'RElbow': 'RightForeArm', 'RWrist': 'RightHand', 'LShoulder': 'LeftShoulder', 'LElbow': 'LeftForeArm', 'LWrist': 'LeftHand', 'RHip': 'RightUpLeg', 'RKnee': 'RightLeg', 'RAnkle': 'RightFoot', 'LHip': 'LeftUpLeg', 'LKnee': 'LeftLeg', 'LAnkle': 'LeftFoot', 'REye': 'RightEyeEnd', 'LEye': 'LeftEyeEnd', 'REar': 'RightEarEnd', 'LEar': 'LeftEarEnd'}
def __init__(self, root: bpy.types.Object, style: str='coco', armature: str='anima'):
if (style == 'coco'):
self.names = self.COCO_NAMES
self.connectivity = self.COCO_CONNECTIVITY
if (armature == 'mixamo'):
self.bone_lookup = self.COCO_BONE_LOOKUP_MIXAMO
elif (armature == 'anima'):
self.bone_lookup = self.COCO_BONE_LOOKUP_ANIMA
else:
raise ValueError(f'Unknown keypoint armature: {armature}')
elif (style == 'body25b'):
self.names = self.BODY25B_NAMES
self.connectivity = self.BODY25B_CONNECTIVITY
if (armature == 'mixamo'):
self.bone_lookup = self.BODY25B_BONE_LOOKUP_MIXAMO
elif (armature == 'anima'):
self.bone_lookup = self.BODY25B_BONE_LOOKUP_ANIMA
else:
raise ValueError(f'Unknown keypoint armature: {armature}')
else:
raise ValueError(f'Unknown keypoint style: {style}')
self.root = root
self.style = style
self.armature = armature
self.bones = {bone.name: bone for bone in self.root.pose.bones}
self.num_keypoints = None
self.keypoints_xyv = None
self.keypoints_xyz = None
def update(self, world_transform=None) -> None:
self.num_keypoints = 0
self.keypoints_xyv = []
self.keypoints_xyz = []
for (name, bone_name) in self.bone_lookup.items():
bone = self.bones.get(bone_name, None)
if (bone is None):
log.warning(f'Could not find keypoint bone {name} using {bone_name}')
if (world_transform is None):
pos = (self.root.matrix_world bone.head)
else:
pos = ((world_transform self.root.matrix_world) bone.head)
(x, y, v) = zpy.camera.camera_xyv(pos, obj=self.root)
self.keypoints_xyv += [x, y, v]
self.keypoints_xyz += tuple(pos)
self.num_keypoints += 1 |
def forward_gen():
I = torch.FloatTensor(3).normal_()
def forward():
return net(I)
return forward |
class MegaConfig(PretrainedConfig):
model_type = 'mega'
def __init__(self, vocab_size=30522, hidden_size=128, num_hidden_layers=4, intermediate_size=256, ema_projection_size=16, bidirectional=True, shared_representation_size=64, use_chunking=False, chunk_size=(- 1), truncation=None, normalize_before_mega=True, normalization_type='scalenorm', norm_affine=True, activation='silu', attention_activation='softmax', dropout_prob=0.1, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, use_feature_dropout=False, use_normalized_ffn=True, nffn_hidden_size=256, normalize_before_ffn=True, nffn_activation_dropout_prob=0.1, max_positions=2048, add_token_type_embeddings=False, type_vocab_size=2, initializer_range=0.02, ema_delta_alpha_range=0.2, ema_beta_range=0.02, ema_gamma_omega_range=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, relative_positional_bias='rotary', classifier_dropout=None, use_cache=True, add_lm_hidden_dense_layer=True, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.activation = activation
self.attention_activation = attention_activation
self.intermediate_size = intermediate_size
self.ema_projection_size = ema_projection_size
self.bidirectional = bidirectional
self.shared_representation_size = shared_representation_size
self.use_chunking = use_chunking
self.chunk_size = chunk_size
self.truncation = truncation
self.normalize_before_mega = normalize_before_mega
self.normalization_type = normalization_type
self.norm_affine = norm_affine
self.dropout_prob = dropout_prob
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.use_feature_dropout = use_feature_dropout
self.use_normalized_ffn = use_normalized_ffn
self.nffn_hidden_size = nffn_hidden_size
self.normalize_before_ffn = normalize_before_ffn
self.nffn_activation_dropout_prob = nffn_activation_dropout_prob
self.max_positions = max_positions
self.add_token_type_embeddings = add_token_type_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.ema_delta_alpha_range = ema_delta_alpha_range
self.ema_beta_range = ema_beta_range
self.ema_gamma_omega_range = ema_gamma_omega_range
self.relative_positional_bias = relative_positional_bias
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer
self.num_attention_heads = 1 |
class RandomTransform():
def __init__(self, seed=None, p=1.0, intensity=0.5):
self.p = p
self.intensity = intensity
self.random = random.Random()
if (seed is not None):
self.seed = seed
self.random.seed(seed)
self.last_tran = None
def get_last_transform(self):
return self.last_tran
def transform(self, image, order=1):
image_rotate = image
image_trans = self.apply_projection_transform(image_rotate, order)
return image_trans
def rotate(self, image):
if (self.random.random() < self.p):
delta = (30.0 * self.intensity)
image_rotate = rotate(image, self.random.uniform((- delta), delta), mode='edge')
return image_rotate
else:
return image
def apply_projection_transform(self, image, order=1):
image_size = image.shape[0]
d = ((image_size * 0.3) * self.intensity)
if (self.random.random() < self.p):
tl_top = self.random.uniform((- d), d)
tl_left = self.random.uniform((- d), d)
bl_bottom = self.random.uniform((- d), d)
bl_left = self.random.uniform((- d), d)
tr_top = self.random.uniform((- d), d)
tr_right = self.random.uniform((- d), d)
br_bottom = self.random.uniform((- d), d)
br_right = self.random.uniform((- d), d)
transform = ProjectiveTransform()
transform.estimate(np.array(((tl_left, tl_top), (bl_left, (image_size - bl_bottom)), ((image_size - br_right), (image_size - br_bottom)), ((image_size - tr_right), tr_top))), np.array(((0, 0), (0, image_size), (image_size, image_size), (image_size, 0))))
self.last_tran = transform
image_trans = warp(image, transform, output_shape=(image_size, image_size), order=order, mode='edge')
return image_trans
else:
return image
def apply_transform(self, image, transform, order=1):
image_size = image.shape[0]
image_trans = warp(image, transform, output_shape=(image_size, image_size), order=order, mode='edge')
return image_trans |
class ModelsClassTest(unittest.TestCase):
def setUpClass(cls):
cls.setup = yaml.load(open(os.path.join('tests', 'data', 'config.yml'), 'r'))
cls.weights_path = {'generator': os.path.join(cls.setup['weights_dir'], 'test_gen_weights.hdf5'), 'discriminator': os.path.join(cls.setup['weights_dir'], 'test_dis_weights.hdf5')}
cls.hr_shape = ((((cls.setup['patch_size'] * 2),) * 2) + (3,))
cls.RRDN = RRDN(arch_params=cls.setup['rrdn'], patch_size=cls.setup['patch_size'])
cls.RRDN.model.compile(optimizer=Adam(), loss=['mse'])
cls.RDN = RDN(arch_params=cls.setup['rdn'], patch_size=cls.setup['patch_size'])
cls.RDN.model.compile(optimizer=Adam(), loss=['mse'])
cls.f_ext = Cut_VGG19(patch_size=(cls.setup['patch_size'] * 2), layers_to_extract=[1, 2])
cls.f_ext.model.compile(optimizer=Adam(), loss=['mse', 'mse'])
cls.discr = Discriminator(patch_size=(cls.setup['patch_size'] * 2))
cls.discr.model.compile(optimizer=Adam(), loss=['mse'])
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_SR_output_shapes(self):
self.assertTrue((self.RRDN.model.output_shape[1:4] == self.hr_shape))
self.assertTrue((self.RDN.model.output_shape[1:4] == self.hr_shape))
def test_that_the_trainable_layers_change(self):
x = np.random.random((1, self.setup['patch_size'], self.setup['patch_size'], 3))
y = np.random.random((1, (self.setup['patch_size'] * 2), (self.setup['patch_size'] * 2), 3))
before_step = []
for layer in self.RRDN.model.layers:
if (len(layer.trainable_weights) > 0):
before_step.append(layer.get_weights()[0])
self.RRDN.model.train_on_batch(x, y)
i = 0
for layer in self.RRDN.model.layers:
if (len(layer.trainable_weights) > 0):
self.assertFalse(np.all((before_step[i] == layer.get_weights()[0])))
i += 1
before_step = []
for layer in self.RDN.model.layers:
if (len(layer.trainable_weights) > 0):
before_step.append(layer.get_weights()[0])
self.RDN.model.train_on_batch(x, y)
i = 0
for layer in self.RDN.model.layers:
if (len(layer.trainable_weights) > 0):
self.assertFalse(np.all((before_step[i] == layer.get_weights()[0])))
i += 1
discr_out_shape = list(self.discr.model.outputs[0].shape)[1:4]
valid = np.ones(([1] + discr_out_shape))
before_step = []
for layer in self.discr.model.layers:
if (len(layer.trainable_weights) > 0):
before_step.append(layer.get_weights()[0])
self.discr.model.train_on_batch(y, valid)
i = 0
for layer in self.discr.model.layers:
if (len(layer.trainable_weights) > 0):
self.assertFalse(np.all((before_step[i] == layer.get_weights()[0])))
i += 1
def test_that_feature_extractor_is_not_trainable(self):
y = np.random.random((1, (self.setup['patch_size'] * 2), (self.setup['patch_size'] * 2), 3))
f_ext_out_shape = list(self.f_ext.model.outputs[0].shape[1:4])
f_ext_out_shape1 = list(self.f_ext.model.outputs[1].shape[1:4])
feats = [np.random.random(([1] + f_ext_out_shape)), np.random.random(([1] + f_ext_out_shape1))]
w_before = []
for layer in self.f_ext.model.layers:
if layer.trainable:
w_before.append(layer.get_weights()[0])
self.f_ext.model.train_on_batch(y, [*feats])
for (i, layer) in enumerate(self.f_ext.model.layers):
if layer.trainable:
self.assertFalse((w_before[i] == layer.get_weights()[0])) |
def load_target_item_embedding(item_ebd_path):
item_embedding = np.load(item_ebd_path)
return item_embedding |
class LPoly():
def __init__(self, coefs, dmin=0):
self.coefs = numpy.array(coefs)
if (len(self.coefs) == 0):
self.dmin = dmin
self.iszero = True
self.coefs = [0]
else:
assert (len(self.coefs.shape) == 1), self.coefs
self.dmin = dmin
self.iszero = False
def dmax(self):
return (((2 * len(self.coefs)) + self.dmin) - 2)
def degree(self):
return max((- self.dmin), self.dmax)
def norm(self):
return numpy.linalg.norm(self.coefs)
def inf_norm(self):
(i, x) = self.curve
return numpy.amax(numpy.absolute((i + (1j * x))))
def parity(self):
return (self.dmin % 2)
def curve(self):
values = numpy.exp(numpy.outer(numpy.linspace((((- self.parity) * 1j) * numpy.pi), (1j * numpy.pi), PRES), range(self.dmin, (self.dmax + 1), 2))).dot(self.coefs)
return (numpy.real(values), numpy.imag(values))
def __getitem__(self, key):
if ((key - self.dmin) % 2):
return 0
pos = ((key - self.dmin) // 2)
if ((pos < len(self.coefs)) and (pos >= 0)):
return self.coefs[pos]
else:
return 0
def __mul__(self, other):
if isinstance(other, LAlg):
return LAlg((self * other.IPoly), (self * other.XPoly))
if (not isinstance(other, LPoly)):
return LPoly((other * self.coefs), self.dmin)
if (self.iszero or other.iszero):
return LPoly([])
dmin = (self.dmin + other.dmin)
coefs = numpy.convolve(self.coefs, other.coefs)
return LPoly(coefs, dmin)
def __rmul__(self, other):
if isinstance(other, LAlg):
return LAlg((self * other.IPoly), ((~ self) * other.XPoly))
elif (not isinstance(other, LPoly)):
return LPoly((other * self.coefs), self.dmin)
def __add__(self, other):
if self.iszero:
return LPoly(other.coefs, other.dmin)
if other.iszero:
return LPoly(self.coefs, self.dmin)
assert (self.parity == other.parity), 'not of the same parity'
dmin = min(self.dmin, other.dmin)
dmax = max(self.dmax, other.dmax)
coefs = (self.aligned(dmin, dmax) + other.aligned(dmin, dmax))
return LPoly(coefs, dmin)
def __neg__(self):
return LPoly(((- 1) * self.coefs), self.dmin)
def __invert__(self):
dmin = (- self.dmax)
coefs = self.coefs[::(- 1)]
return LPoly(coefs, dmin)
def __sub__(self, other):
return (self + (- other))
def __str__(self):
return ' + '.join(['{} * w ^ ({})'.format(self.coefs[i], (self.dmin + (2 * i))) for i in range(len(self.coefs))])
def aligned(self, dmin, dmax):
if self.iszero:
return numpy.zeros((((dmax - dmin) // 2) + 1))
else:
assert ((dmin <= self.dmin) and (dmax >= self.dmax)), 'interval not valid'
return numpy.hstack((numpy.zeros(((self.dmin - dmin) // 2)), numpy.array(self.coefs), numpy.zeros(((dmax - self.dmax) // 2))))
def eval(self, angles):
if self.iszero:
return 1
res = self.coefs.dot(numpy.exp((1j * numpy.outer(numpy.arange(self.dmin, (self.dmax + 1), 2), angles))))
return res
def truncate(cls, p, dmin, dmax):
lb = min(dmin, p.dmin)
ub = max(dmax, p.dmax)
return LPoly(p.aligned(lb, (ub + 2))[((dmin - lb) // 2):(((dmax - ub) // 2) - 1)], dmin)
def isconsistent(cls, a, b):
if a.iszero:
return True
if b.iszero:
return True
return (a.parity == b.parity)
def __eq__(self, other):
iseq = (abs((self.coefs - other.coefs)).sum() < 0.001)
iseq = (iseq and (self.dmin == other.dmin))
return iseq
def round_zeros(self, thresh=1e-05):
self.coefs[(self.coefs < thresh)] = 0
def pos_half(self):
new_coefs = numpy.copy(self.coefs)
nhalf = int(numpy.ceil((len(new_coefs) / 2)))
new_coefs[:nhalf] = 0
return LPoly(new_coefs, self.dmin)
def neg_half(self):
new_coefs = numpy.copy(self.coefs)
nhalf = int(numpy.ceil((len(new_coefs) / 2)))
new_coefs[nhalf:] = 0
return LPoly(new_coefs, self.dmin) |
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, with_bn=True):
super(conv2DBatchNorm, self).__init__()
if (dilation > 1):
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation)
else:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=1)
if with_bn:
self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))
else:
self.cb_unit = nn.Sequential(conv_mod)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs |
def featureL2Norm(feature):
epsilon = 1e-06
norm = torch.pow((torch.sum(torch.pow(feature, 2), 1) + epsilon), 0.5).unsqueeze(1).expand_as(feature)
return torch.div(feature, norm) |
class RepVGGBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode='zeros', avg_pool=False, se_block=False, activation=nn.ReLU()):
super().__init__()
self.groups = groups
self.stride = stride
self.kernel_size = kernel_size
self.in_channels = in_channels
self.out_channels = out_channels
self.padding = padding
self.padding_mode = padding_mode
self.se_block = se_block
assert (padding == 1)
padding_11 = (padding - (3 // 2))
self.fused = False
self.dense_groups = groups
self.nonlinearity = activation
self.rbr_identity = (nn.BatchNorm2d(num_features=in_channels) if ((out_channels == in_channels) and (stride == 1)) else None)
self.rbr_dense = (conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, groups=self.dense_groups) if (kernel_size != 1) else None)
self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups)
if ((stride == 2) and avg_pool):
self.rbr_1x1 = nn.Sequential(nn.AvgPool2d(2, 2), conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, groups=groups))
self.channel_shuffle = (groups > 1)
if self.se_block:
self.se = SE1(in_channels, out_channels, g=groups, ver=(2 if ((out_channels != in_channels) or (stride != 1)) else 1))
def _forward(self, inputs):
if (not self.fused):
rbr_1x1_output = self.rbr_1x1(inputs)
else:
rbr_1x1_output = None
if (self.rbr_dense is None):
dense_output = 0
else:
dense_output = self.rbr_dense(inputs)
return (rbr_1x1_output, dense_output)
def forward(self, inputs):
if (self.rbr_identity is None):
id_out = 0
else:
id_out = self.rbr_identity(inputs)
(rbr_1x1_output, drop_path_output) = self._forward(inputs)
if self.se_block:
if (self.rbr_identity is not None):
id_out = (id_out * self.se(id_out))
if (not self.fused):
out = ((drop_path_output + rbr_1x1_output) + id_out)
else:
out = (drop_path_output + id_out)
if (self.se_block and (self.rbr_identity is None)):
out = (out * self.se(inputs))
out = self.nonlinearity(out)
if self.channel_shuffle:
out = channel_shuffle(out, self.groups)
return out
def fuse_conv_bn(self, conv, bn):
std = (bn.running_var + bn.eps).sqrt()
bias = (bn.bias - ((bn.running_mean * bn.weight) / std))
t = (bn.weight / std).reshape((- 1), 1, 1, 1)
weights = (conv.weight * t)
bn = nn.Identity()
conv = nn.Conv2d(in_channels=conv.in_channels, out_channels=conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, dilation=conv.dilation, groups=conv.groups, bias=True, padding_mode=conv.padding_mode)
conv.weight = torch.nn.Parameter(weights)
conv.bias = torch.nn.Parameter(bias)
return conv
def fuse_repvgg_block(self):
self.rbr_dense = self.fuse_conv_bn(self.rbr_dense.conv, self.rbr_dense.bn)
if (isinstance(self.rbr_1x1, nn.Sequential) and isinstance(self.rbr_1x1[0], nn.AvgPool2d)):
self.rbr_1x1[1] = self.fuse_conv_bn(self.rbr_1x1[1].conv, self.rbr_1x1[1].bn)
rbr_1x1_bias = self.rbr_1x1[1].bias
weight_1x1_expanded = torch.nn.functional.interpolate(self.rbr_1x1[1].weight, scale_factor=2.0, mode='nearest')
weight_1x1_expanded = (weight_1x1_expanded / 4)
weight_1x1_expanded = torch.nn.functional.pad(weight_1x1_expanded, [1, 0, 1, 0])
else:
self.rbr_1x1 = self.fuse_conv_bn(self.rbr_1x1.conv, self.rbr_1x1.bn)
rbr_1x1_bias = self.rbr_1x1.bias
weight_1x1_expanded = torch.nn.functional.pad(self.rbr_1x1.weight, [1, 1, 1, 1])
self.rbr_dense.weight = torch.nn.Parameter((self.rbr_dense.weight + weight_1x1_expanded))
self.rbr_dense.bias = torch.nn.Parameter((self.rbr_dense.bias + rbr_1x1_bias))
self.rbr_1x1 = nn.Identity()
self.fused = True |
def onclick(event, df):
clicked_index = event.ind
fig = plt.gca()
if (None not in clicked_index):
output_folders = df['EvaluationModel'].unique()
nfolders = len(output_folders)
while (len(fig.texts) > (nfolders + (np.math.factorial(nfolders) / (np.math.factorial(2) * np.math.factorial((nfolders - 2)))))):
fig.texts.pop()
bins = np.linspace((- 1), 1, (len(output_folders) + 1))
i_output_folder = np.where((bins < event.mouseevent.xdata))
i_output_folder = i_output_folder[(- 1)][0]
selected_output_folder = df[(df['EvaluationModel'] == output_folders[i_output_folder])]
for iSubject in range(0, len(clicked_index.tolist())):
frame = plt.text(event.mouseevent.xdata, (((- 0.08) - (0.08 * iSubject)) + event.mouseevent.ydata), selected_output_folder['subject'][clicked_index[iSubject]], size=10, ha='center', va='center', bbox=dict(facecolor='red', alpha=0.5))
matplotlib.artist.Artist.set_visible(frame, True)
plt.show() |
class TrainLoop(object):
def __init__(self, cfg, model, data_iter, optimizer, save_dir, device):
self.cfg = cfg
self.model = model
self.data_iter = data_iter
self.optimizer = optimizer
self.save_dir = save_dir
self.device = device
def train(self, get_loss, model_file=None, pretrain_file=None, data_parallel=True):
self.model.train()
self.load(model_file, pretrain_file)
model = self.model.to(self.device)
if data_parallel:
model = nn.DataParallel(model)
global_step = 0
for e in range(self.cfg.n_epochs):
loss_sum = 0.0
iter_bar = tqdm(self.data_iter, desc='Iter (loss=X.XXX)')
for (i, batch) in enumerate(iter_bar):
batch = [t.to(self.device) for t in batch]
self.optimizer.zero_grad()
loss = get_loss(model, batch, global_step).mean()
loss.backward()
self.optimizer.step()
global_step += 1
loss_sum += loss.item()
iter_bar.set_description(('Iter (loss=%5.3f)' % loss.item()))
if ((global_step % self.cfg.save_steps) == 0):
self.save(global_step)
if (self.cfg.total_steps and (self.cfg.total_steps < global_step)):
print(f'Epoch {(e + 1)}/{self.cfg.n_epochs} : Average Loss {(loss_sum / (i + 1))}')
print('The Total Steps have been reached.')
self.save(final=True)
return
print(f'Epoch {(e + 1)}/{self.cfg.n_epochs} : Average Loss {(loss_sum / (i + 1))}')
self.save(final=True)
def eval(self, evaluate, model_file):
self.model.eval()
self.load(model_file, None)
model = self.model.to(self.device)
if self.cfg.data_parallel:
model = nn.DataParallel(model)
results = []
iter_bar = tqdm(self.data_iter, desc='Iter (loss=X.XXX)')
for batch in iter_bar:
batch = [t.to(self.device) for t in batch]
with torch.no_grad():
(accuracy, result) = evaluate(model, batch)
results.append(result)
iter_bar.set_description(('Iter(acc=%5.3f)' % accuracy))
return results
def load(self, model_file, pretrain_file):
if model_file:
print('Loading the model from', model_file)
self.model.load_state_dict(torch.load(model_file))
elif pretrain_file:
print('Loading the pretrained model from', pretrain_file)
if pretrain_file.endswith('.ckpt'):
checkpoint.load_model(self.model.transformer, pretrain_file)
elif pretrain_file.endswith('.pt'):
self.model.transformer.load_state_dict({key[12:]: value for (key, value) in torch.load(pretrain_file).items() if key.startswith('transformer')})
return self.model
def save(self, i=0, final=False):
file = ('model_final.pt' if final else f'model_steps_{i}.pt')
torch.save(self.model.state_dict(), os.path.join(self.save_dir, file)) |
def test_Beyond1Std(white_noise):
a = FeatureSpace(featureList=['Beyond1Std'])
a = a.calculateFeature(white_noise)
assert ((a.result(method='array') >= 0.3) and (a.result(method='array') <= 0.4)) |
_on_pypy
.parametrize('cls_name', ['PickleableWithDict', 'PickleableWithDictNew'])
def test_roundtrip_with_dict(cls_name):
cls = getattr(m, cls_name)
p = cls('test_value')
p.extra = 15
p.dynamic = 'Attribute'
data = pickle.dumps(p, pickle.HIGHEST_PROTOCOL)
p2 = pickle.loads(data)
assert (p2.value == p.value)
assert (p2.extra == p.extra)
assert (p2.dynamic == p.dynamic) |
def weight_norm(module, weights=None, dim=0):
WeightNorm.apply(module, weights, dim)
return module |
def get_frame_count(filepath):
if (filepath is not None):
video = cv2.VideoCapture(filepath)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
video.release()
if (frame_count > 100):
frame_count = 100
return gr.update(maximum=frame_count)
else:
return gr.update(value=1, maximum=100) |
_incremental_state
class WaitSegMultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8):
super().__init__()
self.embed_dim = embed_dim
self.kdim = (kdim if (kdim is not None) else embed_dim)
self.vdim = (vdim if (vdim is not None) else embed_dim)
self.qkv_same_dim = ((self.kdim == embed_dim) and (self.vdim == embed_dim))
self.num_heads = num_heads
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.head_dim = (embed_dim // num_heads)
assert ((self.head_dim * num_heads) == self.embed_dim), 'embed_dim must be divisible by num_heads'
self.scaling = (self.head_dim ** (- 0.5))
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert ((not self.self_attention) or self.qkv_same_dim), 'Self-attention requires query, key and value to be of the same size'
self.k_proj = quant_noise(nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size)
self.v_proj = quant_noise(nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size)
self.q_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size)
self.out_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=(1 / math.sqrt(2)))
nn.init.xavier_uniform_(self.v_proj.weight, gain=(1 / math.sqrt(2)))
nn.init.xavier_uniform_(self.q_proj.weight, gain=(1 / math.sqrt(2)))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if (self.out_proj.bias is not None):
nn.init.constant_(self.out_proj.bias, 0.0)
if (self.bias_k is not None):
nn.init.xavier_normal_(self.bias_k)
if (self.bias_v is not None):
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, need_weights: bool=True, static_kv: bool=False, attn_mask: Optional[Tensor]=None, before_softmax: bool=False, need_head_weights: bool=False, seg_prob=None, training_lagging_seg=None) -> Tuple[(Tensor, Optional[Tensor])]:
if need_head_weights:
need_weights = True
is_tpu = (query.device.type == 'xla')
(tgt_len, bsz, embed_dim) = query.size()
src_len = tgt_len
assert (embed_dim == self.embed_dim)
assert (list(query.size()) == [tgt_len, bsz, embed_dim])
if (key is not None):
(src_len, key_bsz, key_embed_dim) = key.size()
if (not torch.jit.is_scripting()):
assert ((key_bsz, key_embed_dim) == (bsz, embed_dim))
assert (value is not None)
assert ((src_len, bsz, embed_dim) == value.shape)
if (incremental_state is not None):
saved_state = self._get_input_buffer(incremental_state)
if ((saved_state is not None) and ('prev_key' in saved_state)):
if static_kv:
assert (self.encoder_decoder_attention and (not self.self_attention))
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if (key is None):
assert (value is None)
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert ((key is not None) and (value is not None))
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if (self.bias_k is not None):
assert (self.bias_v is not None)
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if (attn_mask is not None):
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if (key_padding_mask is not None):
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (k is not None):
k = k.contiguous().view((- 1), (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (v is not None):
v = v.contiguous().view((- 1), (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (saved_state is not None):
if ('prev_key' in saved_state):
_prev_key = saved_state['prev_key']
assert (_prev_key is not None)
prev_key = _prev_key.view((bsz * self.num_heads), (- 1), self.head_dim)
if static_kv:
k = prev_key
else:
assert (k is not None)
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if ('prev_value' in saved_state):
_prev_value = saved_state['prev_value']
assert (_prev_value is not None)
prev_value = _prev_value.view((bsz * self.num_heads), (- 1), self.head_dim)
if static_kv:
v = prev_value
else:
assert (v is not None)
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if ('prev_key_padding_mask' in saved_state):
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert ((k is not None) and (v is not None))
key_padding_mask = WaitSegMultiheadAttention._append_prev_key_padding_mask(key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv)
saved_state['prev_key'] = k.view(bsz, self.num_heads, (- 1), self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, (- 1), self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
assert (incremental_state is not None)
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert (k is not None)
assert (k.size(1) == src_len)
if ((key_padding_mask is not None) and (key_padding_mask.dim() == 0)):
key_padding_mask = None
if (key_padding_mask is not None):
assert (key_padding_mask.size(0) == bsz)
assert (key_padding_mask.size(1) == src_len)
if self.add_zero_attn:
assert (v is not None)
src_len += 1
k = torch.cat([k, k.new_zeros(((k.size(0), 1) + k.size()[2:]))], dim=1)
v = torch.cat([v, v.new_zeros(((v.size(0), 1) + v.size()[2:]))], dim=1)
if (attn_mask is not None):
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if (key_padding_mask is not None):
key_padding_mask = torch.cat([key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert (list(attn_weights.size()) == [(bsz * self.num_heads), tgt_len, src_len])
if (attn_mask is not None):
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if (key_padding_mask is not None):
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if (not is_tpu):
attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float('-inf'))
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view((bsz * self.num_heads), tgt_len, src_len)
if before_softmax:
return (attn_weights, v)
if (training_lagging_seg is not None):
if (seg_prob is not None):
waitseg_mask = self.build_waitseg_mask(attn_weights, training_lagging_seg, seg_prob)
attn_weights = attn_weights.masked_fill(waitseg_mask, float('-inf'))
else:
waitk_mask = self.build_waitk_mask(attn_weights, training_lagging_seg)
attn_weights = attn_weights.masked_fill(waitk_mask, float('-inf'))
attn_weights_float = utils.softmax(attn_weights, dim=(- 1), onnx_trace=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert (v is not None)
attn = torch.bmm(attn_probs, v)
assert (list(attn.size()) == [(bsz * self.num_heads), tgt_len, self.head_dim])
if (self.onnx_trace and (attn.size(1) == 1)):
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if (not need_head_weights):
attn_weights = attn_weights.mean(dim=0)
return (attn, attn_weights)
def build_waitseg_mask(self, attn_weights, training_lagging_seg, seg_prob):
(bsz, tgt_len, src_len) = attn_weights.size()
idx = torch.arange((training_lagging_seg - 1), ((training_lagging_seg - 1) + tgt_len), device=attn_weights.device).clamp(1, src_len).unsqueeze(0).unsqueeze(2).repeat(bsz, 1, 1)
seg_prob = seg_prob.unsqueeze(1).repeat(1, self.num_heads, 1).contiguous().view(bsz, src_len).unsqueeze(1)
cur_seg_num = torch.cumsum(seg_prob.round(), dim=(- 1))
return (cur_seg_num > idx)
def build_waitk_mask(self, attn_weights, training_lagging_seg):
(bsz, tgt_len, src_len) = attn_weights.size()
idx = torch.arange((training_lagging_seg - 1), ((training_lagging_seg - 1) + tgt_len), device=attn_weights.device).clamp(1, src_len).unsqueeze(0).unsqueeze(2).repeat(bsz, 1, 1)
tmp = torch.arange(0, src_len, device=attn_weights.device).unsqueeze(0).unsqueeze(1).repeat(bsz, 1, 1)
return (tmp > idx)
def _append_prev_key_padding_mask(key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool) -> Optional[Tensor]:
if ((prev_key_padding_mask is not None) and static_kv):
new_key_padding_mask = prev_key_padding_mask
elif ((prev_key_padding_mask is not None) and (key_padding_mask is not None)):
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), key_padding_mask.float()], dim=1)
elif (prev_key_padding_mask is not None):
filler = torch.zeros((batch_size, (src_len - prev_key_padding_mask.size(1))), device=prev_key_padding_mask.device)
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), filler.float()], dim=1)
elif (key_padding_mask is not None):
filler = torch.zeros((batch_size, (src_len - key_padding_mask.size(1))), device=key_padding_mask.device)
new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
.export
def reorder_incremental_state(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], new_order: Tensor):
input_buffer = self._get_input_buffer(incremental_state)
if (input_buffer is not None):
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if (input_buffer_k is not None):
if (self.encoder_decoder_attention and (input_buffer_k.size(0) == new_order.size(0))):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]) -> Dict[(str, Optional[Tensor])]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if (result is not None):
return result
else:
empty_result: Dict[(str, Optional[Tensor])] = {}
return empty_result
def _set_input_buffer(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], buffer: Dict[(str, Optional[Tensor])]):
return self.set_incremental_state(incremental_state, 'attn_state', buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = ((name + '.') if (name != '') else '')
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith((prefix + 'in_proj_weight')):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.weight')] = state_dict[k][:dim]
items_to_add[(prefix + 'k_proj.weight')] = state_dict[k][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.weight')] = state_dict[k][(2 * dim):]
keys_to_remove.append(k)
k_bias = (prefix + 'in_proj_bias')
if (k_bias in state_dict.keys()):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.bias')] = state_dict[k_bias][:dim]
items_to_add[(prefix + 'k_proj.bias')] = state_dict[k_bias][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.bias')] = state_dict[k_bias][(2 * dim):]
keys_to_remove.append((prefix + 'in_proj_bias'))
for k in keys_to_remove:
del state_dict[k]
for (key, value) in items_to_add.items():
state_dict[key] = value |
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
batch_time = utils.AvgrageMeter()
model.train()
for (step, (input, target)) in enumerate(train_queue):
target = target.cuda(non_blocking=True)
input = input.cuda(non_blocking=True)
b_start = time.time()
optimizer.zero_grad()
(logits, logits_aux) = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += (args.auxiliary_weight * loss_aux)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
batch_time.update((time.time() - b_start))
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if ((step % args.report_freq) == 0):
end_time = time.time()
if (step == 0):
duration = 0
start_time = time.time()
else:
duration = (end_time - start_time)
start_time = time.time()
logging.info('TRAIN Step: %03d Objs: %e R1: %f R5: %f Duration: %ds BTime: %.3fs', step, objs.avg, top1.avg, top5.avg, duration, batch_time.avg)
return (top1.avg, objs.avg) |
def tokenizer(sentence: str) -> List[str]:
return [stem(token) for token in simple_preprocess(sentence) if (token not in STOPWORDS)] |
_registry(pattern_type='InsertBF16Node')
class InsertBF16Node(Pattern):
def __call__(self, model):
def fp32_to_bf16(fp32_np):
assert (fp32_np.dtype == np.float32)
int32_np = fp32_np.view(dtype=np.int32)
int32_np = (int32_np >> 16)
bf16_np = int32_np.astype(np.uint16)
return bf16_np
for node in model.nodes:
if ((node.op_type in EXECUTOR_TYPE) and (EXECUTOR_TYPE[node.op_type] == 'InnerProduct') and (node.input_tensors[1].dtype == 'fp16')):
util.set_autocast('cast_type', 'bf16')
node.input_tensors[1].dtype = 'bf16'
if (len(node.input_tensors) > 2):
bias_fp32 = node.input_tensors[2].data
if (node.input_tensors[2].dtype == 'fp32'):
node.input_tensors[2].data = fp32_to_bf16(bias_fp32)
node.input_tensors[2].dtype = 'bf16'
input_tensor = node.input_tensors[0]
input_name = input_tensor.name
quant_output = Tensor(name=(input_name + '_quant'), source_op=[(node.name + '_quant')], dest_op=[node.name], dtype='bf16')
quantize_op = util.construct_node(node_name=(node.name + '_quant'), op_type='Quantize', input_tensors=[input_tensor], output_tensors=[quant_output], attr=OrderedDict({'output_dtype': 'bf16'}))
insert_idx = model.get_node_id(node.name)
model.insert_nodes(insert_idx, [quantize_op])
node.input_tensors[0] = quant_output
dest_op = node.output_tensors[0].dest_op
if (dest_op and (model.get_node_by_name(dest_op[0]).op_type != 'LayerNorm')):
next_node = model.get_node_by_name(dest_op[0])
next_dest_op = next_node.output_tensors[0].dest_op
if (not next_dest_op):
continue
node.attr['output_dtype'] = 'bf16'
remove_duplicate_set = set()
duplicate_list = []
for node in model.nodes:
sz = len(remove_duplicate_set)
remove_duplicate_set.add(node.output_tensors[0].name)
new_sz = len(remove_duplicate_set)
if (new_sz == sz):
duplicate_list.append(node.name)
model.remove_nodes(duplicate_list)
if duplicate_list:
for node in model.nodes:
next_op = node.output_tensors[0].dest_op
if ((node.op_type in EXECUTOR_TYPE) and (EXECUTOR_TYPE[node.op_type] == 'Matmul') and (model.get_node_by_name(next_op[0]).op_type == 'Softmax')):
node.attr['output_dtype'] = 'bf16'
model.get_node_by_name(next_op[0]).attr['output_dtype'] = 'bf16'
return model |
def popluate_word_id_from_token(token, word_to_id):
list_of_ids = []
word = token.split()[0].strip()
if (word not in word_to_id):
word = '**UNK**'
word_one_hot_vec = np.zeros(len(word_to_id))
word_id = word_to_id[word]
word_one_hot_vec[word_id] = 1.0
list_of_ids.append(word_id)
arr2d = np.array(list_of_ids)
return arr2d |
def attack(tensor, net, eps=0.001, n_iter=50):
new_tensor = tensor.detach().clone()
orig_prediction = net(tensor).argmax()
print(f'Original prediction: {orig_prediction.item()}')
for i in range(n_iter):
net.zero_grad()
grad = compute_gradient(func, new_tensor, net=net, target=orig_prediction.item())
new_tensor = torch.clamp((new_tensor + (eps * grad.sign())), (- 2), 2)
new_prediction = net(new_tensor).argmax()
if (orig_prediction != new_prediction):
print(f'We fooled the network after {i} iterations!')
print(f'New prediction: {new_prediction.item()}')
break
return (new_tensor, orig_prediction.item(), new_prediction.item()) |
def parse_args():
parser = argparse.ArgumentParser(description='Make csv submission file for Kaggle')
parser.add_argument('--testpkl-path', type=str)
parser.add_argument('--dcalphas-path', type=str)
parser.add_argument('--psis-path', type=str)
parser.add_argument('--phis-path', type=str)
parser.add_argument('--output-dir', type=str)
return parser.parse_args() |
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(256, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out |
def get_identity_preconditioner():
def init_fn(_):
return IdentityPreconditionerState()
def update_preconditioner_fn(*args, **kwargs):
return IdentityPreconditionerState()
def multiply_by_m_inv_fn(vec, _):
return vec
def multiply_by_m_sqrt_fn(vec, _):
return vec
def multiply_by_m_sqrt_inv_fn(vec, _):
return vec
return Preconditioner(init=init_fn, update_preconditioner=update_preconditioner_fn, multiply_by_m_inv=multiply_by_m_inv_fn, multiply_by_m_sqrt=multiply_by_m_sqrt_fn, multiply_by_m_sqrt_inv=multiply_by_m_sqrt_inv_fn) |
class LeNet(nn.Module):
def __init__(self, **kwargs):
super(LeNet, self).__init__()
self.num_of_datasets = kwargs.get('num_of_datasets', 1)
self.num_of_classes = kwargs.get('num_of_classes', 10)
self.ds_idx = 0
self.l1 = nn.Conv2d(3, 20, kernel_size=5, padding=1)
self.l1_relu = nn.ReLU(inplace=True)
self.l1_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.l2 = nn.Conv2d(20, 50, kernel_size=5, padding=1)
self.l2_relu = nn.ReLU(inplace=True)
self.l2_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.linear = nn.Sequential(nn.Linear(1800, 500))
self.last = nn.ModuleList([nn.Linear(500, self.num_of_classes) for _ in range(0, self.num_of_datasets)])
def set_dataset(self, ds_idx):
self.ds_idx = (ds_idx % self.num_of_datasets)
def forward(self, x):
out = self.l1(x)
out = self.l1_relu(out)
out = self.l1_maxpool(out)
out = self.l2(out)
out = self.l2_relu(out)
out = self.l2_maxpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
out = self.last[self.ds_idx](out)
return out |
def main(argv=None):
parser = argparse.ArgumentParser('Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('solution', type=str, choices=('linear', 'MLP', 'invariant'))
parser.add_argument('log_dir', type=str, help='Logging folder')
parser.add_argument('--checkpoint', type=str, help='Pickled solver and solution')
parser.add_argument('--env-seed', type=int)
parser.add_argument('--eval-frequency', type=int, default=25)
parser.add_argument('--feature-seed', type=int)
parser.add_argument('-m', '--max-iter', type=int, default=10000, help='Maximum number of iterations')
parser.add_argument('-e', '--n-episodes', type=int, default=16, help='Number of rollouts for fitness evaluation')
parser.add_argument('-j', '--n-jobs', type=int, default=(- 1), help='Number of processes')
parser.add_argument('-n', '--n-noise-features', type=int, default=0, help='Number of noise features')
parser.add_argument('-p', '--population-size', type=int, default=256, help='Number of solutions per generation')
parser.add_argument('-s', '--shuffle-on-reset', action='store_true', help='Shuffle features before each rollout')
args = parser.parse_args(argv)
writer = SummaryWriter(args.log_dir)
writer.add_text('parameters', json.dumps(vars(args)))
if (args.solution == 'linear'):
solution_inst = MLPSolution(n_features=(N_ORIGINAL_FEATURES + args.n_noise_features), hidden_layer_sizes=tuple())
elif (args.solution == 'MLP'):
solution_inst = MLPSolution(n_features=(N_ORIGINAL_FEATURES + args.n_noise_features), hidden_layer_sizes=(16,))
elif (args.solution == 'invariant'):
solution_inst = PermutationInvariantSolution(n_embeddings=16, proj_dim=32, hidden_size=8)
else:
raise ValueError
if (args.checkpoint is None):
x0 = np.zeros(solution_inst.get_n_params())
solver = cma.CMAEvolutionStrategy(x0=x0, sigma0=0.1, inopts={'popsize': args.population_size, 'seed': 42, 'randn': np.random.randn})
else:
with open(args.checkpoint, 'rb') as f:
(solver, solution_inst_) = pickle.load(f)
assert isinstance(solution_inst, solution_inst_.__class__)
solution_inst = solution_inst_
get_fitness_partial = partial(get_fitness, n_episodes=args.n_episodes, shuffle_on_reset=args.shuffle_on_reset, n_noise_features=args.n_noise_features, env_seed=args.env_seed, feature_seed=args.feature_seed)
if (args.n_jobs == (- 1)):
n_jobs = mp.cpu_count()
else:
n_jobs = args.n_jobs
with mp.Pool(processes=n_jobs) as pool:
for n_iter in tqdm.tqdm(range(args.max_iter)):
try:
params_set = solver.ask()
iterable = [solution_inst.clone().set_params(p) for p in params_set]
rewards = pool.map(get_fitness_partial, iterable)
pos_fitnesses = [np.mean(r) for r in rewards]
neg_fitnesses = [(- x) for x in pos_fitnesses]
all_parameters = np.concatenate(params_set)
metrics = {'parameter_mean': all_parameters.mean(), 'parameter_std': all_parameters.std(), 'mean': np.mean(pos_fitnesses), 'max (generation)': np.max(pos_fitnesses), 'max (overall)': (- solver.result.fbest)}
for (metric_name, metric) in metrics.items():
writer.add_scalar(metric_name, metric, global_step=n_iter)
if (((n_iter % args.eval_frequency) == 0) or (n_iter == (args.max_iter - 1))):
save(args.log_dir, n_iter, solver, solution_inst)
solver.tell(params_set, neg_fitnesses)
except KeyboardInterrupt:
save(args.log_dir, n_iter, solver, solution_inst)
break |
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.E = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, True), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, True), nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, True), nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, True), nn.AdaptiveAvgPool2d(1))
self.mlp = nn.Sequential(nn.Linear(256, 256), nn.LeakyReLU(0.1, True), nn.Linear(256, 256))
def forward(self, x):
fea = self.E(x).squeeze((- 1)).squeeze((- 1))
out = self.mlp(fea)
return (fea, out) |
def test_digits_cosine_greedi_nn():
model1 = FacilityLocationSelection(100)
model2 = GraphCutSelection(100)
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], metric='cosine', optimizer='greedi', optimizer_kwds={'optimizer1': 'naive', 'optimizer2': 'naive'}, random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking[:50], digits_cosine_greedi_ranking[:50])
assert_array_almost_equal(model.gains[:50], digits_cosine_greedi_gains[:50], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class Schaffer(FloatProblem):
def __init__(self):
super(Schaffer, self).__init__()
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)']
self.lower_bound = [(- 1000)]
self.upper_bound = [1000]
def number_of_objectives(self) -> int:
return len(self.obj_directions)
def number_of_constraints(self) -> int:
return 0
def evaluate(self, solution: FloatSolution) -> FloatSolution:
value = solution.variables[0]
solution.objectives[0] = (value ** 2)
solution.objectives[1] = ((value - 2) ** 2)
return solution
def name(self):
return 'Schaffer' |
def triplet_semihard_loss(labels, embeddings, margin=1.0):
lshape = array_ops.shape(labels)
assert (lshape.shape == 1)
labels = array_ops.reshape(labels, [lshape[0], 1])
pdist_matrix = pairwise_distance(embeddings, squared=True)
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(array_ops.tile(adjacency_not, [batch_size, 1]), math_ops.greater(pdist_matrix_tile, array_ops.reshape(array_ops.transpose(pdist_matrix), [(- 1), 1])))
mask_final = array_ops.reshape(math_ops.greater(math_ops.reduce_sum(math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True), 0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
negatives_outside = array_ops.reshape(masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
negatives_inside = array_ops.tile(masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, (pdist_matrix - semi_hard_negatives))
mask_positives = (math_ops.cast(adjacency, dtype=dtypes.float32) - array_ops.diag(array_ops.ones([batch_size])))
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(math_ops.reduce_sum(math_ops.maximum(math_ops.multiply(loss_mat, mask_positives), 0.0)), num_positives, name='triplet_semihard_loss')
return triplet_loss |
def circuit_drawer(circuit, scale=0.7, filename=None, style=None, output=None, interactive=False, line_length=None, plot_barriers=True, reverse_bits=False, justify=None):
image = None
config = user_config.get_config()
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if (default_output == 'auto'):
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if (output is None):
output = default_output
if (output == 'text'):
return _text_circuit_drawer(circuit, filename=filename, line_length=line_length, reverse_bits=reverse_bits, plotbarriers=plot_barriers, justify=justify)
elif (output == 'latex'):
image = _latex_circuit_drawer(circuit, scale=scale, filename=filename, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify)
elif (output == 'latex_source'):
return _generate_latex_source(circuit, filename=filename, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify)
elif (output == 'mpl'):
image = _matplotlib_circuit_drawer(circuit, scale=scale, filename=filename, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify)
else:
raise exceptions.VisualizationError(('Invalid output type %s selected. The only valid choices are latex, latex_source, text, and mpl' % output))
if (image and interactive):
image.show()
return image |
def get_command(scaffolding, command_path):
(path, _, command_name) = command_path.rpartition('.')
if (path not in scaffolding):
raise KeyError(('Ingredient for command "%s" not found.' % command_path))
if (command_name in scaffolding[path].commands):
return scaffolding[path].commands[command_name]
elif path:
raise KeyError(('Command "%s" not found in ingredient "%s"' % (command_name, path)))
else:
raise KeyError(('Command "%s" not found' % command_name)) |
def get_prediction_challenge_split(split: str, dataroot: str='/data/sets/nuscenes') -> List[str]:
if (split not in {'mini_train', 'mini_val', 'train', 'train_val', 'val'}):
raise ValueError('split must be one of (mini_train, mini_val, train, train_val, val)')
if (split == 'train_val'):
split_name = 'train'
else:
split_name = split
path_to_file = os.path.join(dataroot, 'maps', 'prediction', 'prediction_scenes.json')
prediction_scenes = json.load(open(path_to_file, 'r'))
scenes = create_splits_scenes()
scenes_for_split = scenes[split_name]
if (split == 'train'):
scenes_for_split = scenes_for_split[NUM_IN_TRAIN_VAL:]
if (split == 'train_val'):
scenes_for_split = scenes_for_split[:NUM_IN_TRAIN_VAL]
token_list_for_scenes = map((lambda scene: prediction_scenes.get(scene, [])), scenes_for_split)
return list(chain.from_iterable(token_list_for_scenes)) |
def plot_uncertainty(df, kind, threshold=None, title=None):
try:
from skmisc.loess import loess
except ImportError:
raise ImportError('Uncertainty plots with loess estimation require scikit-misc, which is not installed.')
if (kind == 'tile'):
df = df.sample(n=1000)
(f, axes) = plt.subplots(1, 3)
f.set_size_inches(15, 5)
palette = sns.color_palette('Set2')
tf_pal = {True: palette[0], False: palette[1]}
kde = sns.kdeplot(x='uncertainty', hue='correct', data=df, fill=True, palette=tf_pal, ax=axes[0])
kde.set(xlabel='Uncertainty')
axes[0].title.set_text(f'Uncertainty density ({kind}-level)')
if (threshold is not None):
axes[1].axhline(y=threshold, color='r', linestyle='--')
at_df = df.loc[(df['uncertainty'] >= threshold)]
c_a_df = at_df.loc[at_df['correct']]
ic_a_df = at_df.loc[(~ at_df['correct'])]
axes[1].scatter(x=c_a_df['y_pred'], y=c_a_df['uncertainty'], marker='o', s=10, color='gray')
axes[1].scatter(x=ic_a_df['y_pred'], y=ic_a_df['uncertainty'], marker='x', color='#FC6D77')
if (threshold is not None):
bt_df = df.loc[(df['uncertainty'] < threshold)]
else:
bt_df = df
c_df = bt_df.loc[bt_df['correct']]
ic_df = bt_df.loc[(~ bt_df['correct'])]
axes[1].scatter(x=c_df['y_pred'], y=c_df['uncertainty'], marker='o', s=10)
axes[1].scatter(x=ic_df['y_pred'], y=ic_df['uncertainty'], marker='x', color='red')
if (title is not None):
axes[1].title.set_text(title)
l_df = df[['uncertainty', 'correct']].sort_values(by=['uncertainty'])
x = l_df['uncertainty'].to_numpy()
y = l_df['correct'].astype(float).to_numpy()
ol = loess(x, y)
ol.fit()
pred = ol.predict(x, stderror=True)
conf = pred.confidence()
z = pred.values
ll = conf.lower
ul = conf.upper
axes[2].plot(x, y, '+', ms=6)
axes[2].plot(x, z)
axes[2].fill_between(x, ll, ul, alpha=0.2)
axes[2].tick_params(labelrotation=90)
axes[2].set_ylim((- 0.1), 1.1)
if (threshold is not None):
axes[2].axvline(x=threshold, color='r', linestyle='--')
for ax in (axes[1], axes[2]):
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('black')
ax.tick_params(axis='x', colors='black')
ax.grid(visible=True, which='both', axis='both', color='white')
ax.set_facecolor('#EAEAF2') |
def BFS(block_map, current_member: List[int]):
combinations = []
if (current_member == []):
cur_max = (- 1)
else:
cur_max = max(current_member)
combinations.append(current_member)
member_set = set(current_member)
l = len(block_map)
for i in range((cur_max + 1), l):
if (i in member_set):
continue
b = block_map[i]
friend = all([b[m] for m in current_member])
if (not friend):
continue
combinations += BFS(block_map, (current_member + [i]))
return combinations |
class Reinforcement(object):
def __init__(self, generator, predictor, get_reward):
super(Reinforcement, self).__init__()
self.generator = generator
self.predictor = predictor
self.get_reward = get_reward
def policy_gradient(self, data, n_batch=10, gamma=0.97, std_smiles=False, grad_clipping=None, **kwargs):
rl_loss = 0
self.generator.optimizer.zero_grad()
total_reward = 0
for _ in range(n_batch):
reward = 0
trajectory = '<>'
while (reward == 0):
trajectory = self.generator.evaluate(data)
if std_smiles:
try:
mol = Chem.MolFromSmiles(trajectory[1:(- 1)])
trajectory = (('<' + Chem.MolToSmiles(mol)) + '>')
reward = self.get_reward(trajectory[1:(- 1)], self.predictor, **kwargs)
except:
reward = 0
else:
reward = self.get_reward(trajectory[1:(- 1)], self.predictor, **kwargs)
trajectory_input = data.char_tensor(trajectory)
discounted_reward = reward
total_reward += reward
hidden = self.generator.init_hidden()
if self.generator.has_cell:
cell = self.generator.init_cell()
hidden = (hidden, cell)
if self.generator.has_stack:
stack = self.generator.init_stack()
else:
stack = None
for p in range((len(trajectory) - 1)):
(output, hidden, stack) = self.generator(trajectory_input[p], hidden, stack)
log_probs = F.log_softmax(output, dim=1)
top_i = trajectory_input[(p + 1)]
rl_loss -= (log_probs[(0, top_i)] * discounted_reward)
discounted_reward = (discounted_reward * gamma)
rl_loss = (rl_loss / n_batch)
total_reward = (total_reward / n_batch)
rl_loss.backward()
if (grad_clipping is not None):
torch.nn.utils.clip_grad_norm_(self.generator.parameters(), grad_clipping)
self.generator.optimizer.step()
return (total_reward, rl_loss.item()) |
def set_quad_double_target_system(pols, vrblvl=0):
if (vrblvl > 0):
print('in set_quad_double_target_system, with pols :')
for pol in pols:
print(pol)
nvr = number_of_symbols(pols, vrblvl)
set_quad_double_system(nvr, pols, vrblvl)
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> set_quad_double_target_system calls phc', end='')
retval = phc(262, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class TFMPNetModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.