code stringlengths 101 5.91M |
|---|
def get_default_args():
parser = get_model_dataset_args()
parser.add_argument('--output_dir', type=str, help='Output directory')
parser.add_argument('--eval_freq', type=int, default=50)
parser.add_argument('--save_freq', type=int, default=50)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--reweight_groups', action='store_true', help='Reweight groups')
parser.add_argument('--reweight_classes', action='store_true', help='Reweight classes')
parser.add_argument('--reweight_spurious', action='store_true', help='Reweight based on spurious attribute')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_epochs', type=int, default=300)
parser.add_argument('--optimizer', type=str, required=False, default='sgd_optimizer', choices=['sgd_optimizer', 'adamw_optimizer', 'bert_adamw_optimizer'], help='Optimizer name')
parser.add_argument('--scheduler', type=str, required=False, default='constant_lr_scheduler', choices=['cosine_lr_scheduler', 'constant_lr_scheduler', 'bert_lr_scheduler'], help='Scheduler name')
parser.add_argument('--weight_decay', type=float, default=1e-06)
parser.add_argument('--momentum_decay', type=float, default=0.9)
parser.add_argument('--init_lr', type=float, default=0.06)
parser.add_argument('--no_shuffle_train', action='store_true')
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--num_minority_groups_remove', type=int, required=False, default=0)
return parser |
def train_model(args, model, train, dev, src=None, trg=None, trg_len_dic=None, teacher_model=None, save_path=None, maxsteps=None):
if (args.tensorboard and (not args.debug)):
from tensorboardX import SummaryWriter
writer = SummaryWriter(str((args.event_path / args.id_str)))
if ((type(model) is FastTransformer) and (args.denoising_prob > 0.0)):
denoising_weights = [args.denoising_weight for idx in range(args.train_repeat_dec)]
denoising_out_weights = [args.denoising_out_weight for idx in range(args.train_repeat_dec)]
if ((type(model) is FastTransformer) and args.layerwise_denoising_weight):
(start, end) = (0.9, 0.1)
diff = ((start - end) / (args.train_repeat_dec - 1))
denoising_weights = (np.arange(start=end, stop=start, step=diff).tolist()[::(- 1)] + [0.1])
for (k, p) in zip(model.state_dict().keys(), model.parameters()):
if args.finetune_trg_len:
if ('pred_len' not in k):
p.requires_grad = False
else:
print(k)
elif ('pred_len' in k):
p.requires_grad = False
params = [p for p in model.parameters() if p.requires_grad]
if (args.optimizer == 'Adam'):
opt = torch.optim.Adam(params, betas=(0.9, 0.98), eps=1e-09)
else:
raise NotImplementedError
if ((args.load_from is not None) and args.resume and (not args.finetune_trg_len)):
with torch.cuda.device(args.gpu):
(offset, opt_states) = torch.load((str((args.model_path / args.load_from)) + '.pt.states'), map_location=(lambda storage, loc: storage.cuda()))
opt.load_state_dict(opt_states)
else:
offset = 0
if (not args.finetune_trg_len):
best = Best(max, *['BLEU_dec{}'.format((ii + 1)) for ii in range(args.valid_repeat_dec)], 'i', model=model, opt=opt, path=str((args.model_path / args.id_str)), gpu=args.gpu, which=range(args.valid_repeat_dec))
else:
best = Best(max, *['pred_target_len_correct'], 'i', model=model, opt=opt, path=str((args.model_path / args.id_str)), gpu=args.gpu, which=[0])
train_metrics = Metrics('train loss', *['loss_{}'.format((idx + 1)) for idx in range(args.train_repeat_dec)], data_type='avg')
dev_metrics = Metrics('dev loss', *['loss_{}'.format((idx + 1)) for idx in range(args.valid_repeat_dec)], data_type='avg')
if ('predict' in args.trg_len_option):
train_metrics_trg = Metrics('train loss target', *['pred_target_len_loss', 'pred_target_len_correct', 'pred_target_len_approx'], data_type='avg')
train_metrics_average = Metrics('train loss average', *['average_target_len_correct', 'average_target_len_approx'], data_type='avg')
dev_metrics_trg = Metrics('dev loss target', *['pred_target_len_loss', 'pred_target_len_correct', 'pred_target_len_approx'], data_type='avg')
dev_metrics_average = Metrics('dev loss average', *['average_target_len_correct', 'average_target_len_approx'], data_type='avg')
else:
train_metrics_trg = None
train_metrics_average = None
dev_metrics_trg = None
dev_metrics_average = None
if (not args.no_tqdm):
progressbar = tqdm(total=args.eval_every, desc='start training.')
if (maxsteps is None):
maxsteps = args.maximum_steps
for (iters, train_batch) in enumerate(train):
iters += offset
if ((args.save_every > 0) and ((iters % args.save_every) == 0)):
args.logger.info('save (back-up) checkpoints at iter={}'.format(iters))
with torch.cuda.device(args.gpu):
torch.save(best.model.state_dict(), '{}_iter={}.pt'.format(str((args.model_path / args.id_str)), iters))
torch.save([iters, best.opt.state_dict()], '{}_iter={}.pt.states'.format(str((args.model_path / args.id_str)), iters))
if (((iters + 1) % args.eval_every) == 0):
torch.cuda.empty_cache()
gc.collect()
dev_metrics.reset()
if (dev_metrics_trg is not None):
dev_metrics_trg.reset()
if (dev_metrics_average is not None):
dev_metrics_average.reset()
outputs_data = valid_model(args, model, dev, dev_metrics, dev_metrics_trg=dev_metrics_trg, dev_metrics_average=dev_metrics_average, teacher_model=None, print_out=False, trg_len_dic=trg_len_dic)
if (args.tensorboard and (not args.debug)):
for ii in range(args.valid_repeat_dec):
writer.add_scalar('dev/single/Loss_{}'.format((ii + 1)), getattr(dev_metrics, 'loss_{}'.format((ii + 1))), iters)
writer.add_scalar('dev/single/BLEU_{}'.format((ii + 1)), outputs_data['real'][ii][0], iters)
if ('predict' in args.trg_len_option):
writer.add_scalar('dev/single/pred_target_len_loss', outputs_data['pred_target_len_loss'], iters)
writer.add_scalar('dev/single/pred_target_len_correct', outputs_data['pred_target_len_correct'], iters)
writer.add_scalar('dev/single/pred_target_len_approx', outputs_data['pred_target_len_approx'], iters)
writer.add_scalar('dev/single/average_target_len_correct', outputs_data['average_target_len_correct'], iters)
writer.add_scalar('dev/single/average_target_len_approx', outputs_data['average_target_len_approx'], iters)
'\n writer.add_scalars(\'dev/total/BLEUs\', {"iter_{}".format(idx+1):bleu for idx, bleu in enumerate(outputs_data[\'bleu\']) }, iters)\n writer.add_scalars(\'dev/total/Losses\',\n { "iter_{}".format(idx+1):getattr(dev_metrics, "loss_{}".format(idx+1))\n for idx in range(args.valid_repeat_dec) },\n iters )\n '
if (not args.debug):
if (not args.finetune_trg_len):
best.accumulate(*[xx[0] for xx in outputs_data['real']], iters)
values = list(best.metrics.values())
args.logger.info('best model : {}, {}'.format('BLEU=[{}]'.format(', '.join([str(x) for x in values[:args.valid_repeat_dec]])), 'i={}'.format(values[args.valid_repeat_dec])))
else:
best.accumulate(*[outputs_data['pred_target_len_correct']], iters)
values = list(best.metrics.values())
args.logger.info('best model : {}'.format('pred_target_len_correct = {}'.format(values[0])))
args.logger.info((('model:' + args.prefix) + args.hp_str))
if (not args.no_tqdm):
progressbar.close()
progressbar = tqdm(total=args.eval_every, desc='start training.')
if ((type(model) is FastTransformer) and args.anneal_denoising_weight):
for (ii, bb) in enumerate([xx[0] for xx in outputs_data['real']][:(- 1)]):
denoising_weights[ii] = (0.9 - (0.1 * int(math.floor((bb / 3.0)))))
if (iters > maxsteps):
args.logger.info('reached the maximum updating steps.')
break
model.train()
def get_lr_transformer(i, lr0=0.1):
return (((lr0 * 10) / math.sqrt(args.d_model)) * min((1 / math.sqrt(i)), (i / (args.warmup * math.sqrt(args.warmup)))))
def get_lr_anneal(iters, lr0=0.1):
lr_end = 1e-05
return (max(0, (((args.lr - lr_end) * (args.anneal_steps - iters)) / args.anneal_steps)) + lr_end)
if (args.lr_schedule == 'fixed'):
opt.param_groups[0]['lr'] = args.lr
elif (args.lr_schedule == 'anneal'):
opt.param_groups[0]['lr'] = get_lr_anneal((iters + 1))
elif (args.lr_schedule == 'transformer'):
opt.param_groups[0]['lr'] = get_lr_transformer((iters + 1))
opt.zero_grad()
if (args.dataset == 'mscoco'):
(decoder_inputs, decoder_masks, targets, target_masks, _, source_masks, encoding, batch_size, rest) = model.quick_prepare_mscoco(train_batch, all_captions=train_batch[1], fast=(type(model) is FastTransformer), inputs_dec=args.inputs_dec, trg_len_option=args.trg_len_option, max_len=args.max_offset, trg_len_dic=trg_len_dic, bp=args.bp)
else:
(decoder_inputs, decoder_masks, targets, target_masks, sources, source_masks, encoding, batch_size, rest) = model.quick_prepare(train_batch, fast=(type(model) is FastTransformer), trg_len_option=args.trg_len_option, trg_len_ratio=args.trg_len_ratio, trg_len_dic=trg_len_dic, bp=args.bp)
losses = []
if (type(model) is Transformer):
loss = model.cost(targets, target_masks, out=model(encoding, source_masks, decoder_inputs, decoder_masks))
losses.append(loss)
elif (type(model) is FastTransformer):
all_logits = []
all_denoising_masks = []
for iter_ in range(args.train_repeat_dec):
torch.cuda.empty_cache()
curr_iter = min(iter_, (args.num_decs - 1))
next_iter = min((curr_iter + 1), (args.num_decs - 1))
out = model(encoding, source_masks, decoder_inputs, decoder_masks, iter_=curr_iter, return_probs=False)
if (args.rf_finetune is True):
loss = model.rf_cost(args, targets, target_masks, out=out, iter_=curr_iter)
elif (args.nat_finetune is True):
loss = model.nat_cost(args, targets, target_masks, out=out, iter_=curr_iter)
elif (args.ng_finetune or (args.joint is True)):
loss = model.ngram_cost(args, iters, targets, target_masks, out=out, iter_=curr_iter)
else:
loss = model.cost(targets, target_masks, out=out, iter_=curr_iter)
logits = model.decoder[curr_iter].out(out)
if args.use_argmax:
(_, argmax) = torch.max(logits, dim=(- 1))
else:
probs = softmax(logits)
probs_sz = probs.size()
logits_ = Variable(probs.data, requires_grad=False)
argmax = torch.multinomial(logits_.contiguous().view((- 1), probs_sz[(- 1)]), 1).view(*probs_sz[:(- 1)])
if (args.self_distil > 0.0):
all_logits.append(logits_masked)
del logits
losses.append(loss)
decoder_inputs_ = 0
denoising_mask = 1
if (args.next_dec_input in ['both', 'emb']):
if ((args.denoising_prob > 0.0) and (np.random.rand() < args.denoising_prob)):
cor = corrupt_target(targets, decoder_masks, len(trg.vocab), denoising_weights[iter_], args.corruption_probs)
emb = F.embedding(cor, (model.decoder[next_iter].out.weight * math.sqrt(args.d_model)))
denoising_mask = 0
else:
emb = F.embedding(argmax, (model.decoder[next_iter].out.weight * math.sqrt(args.d_model)))
if (args.denoising_out_weight > 0):
if (denoising_out_weights[iter_] > 0.0):
corrupted_argmax = corrupt_target(argmax, decoder_masks, denoising_out_weights[iter_])
else:
corrupted_argmax = argmax
emb = F.embedding(corrupted_argmax, (model.decoder[next_iter].out.weight * math.sqrt(args.d_model)))
decoder_inputs_ += emb
all_denoising_masks.append(denoising_mask)
if (args.next_dec_input in ['both', 'out']):
decoder_inputs_ += out
decoder_inputs = decoder_inputs_
if (args.self_distil > 0.0):
self_distil_losses = []
for logits_i in range(1, (len(all_logits) - 1)):
self_distill_loss_i = 0.0
for logits_j in range((logits_i + 1), len(all_logits)):
self_distill_loss_i += ((((all_denoising_masks[logits_j] * all_denoising_masks[logits_i]) * (1 / (logits_j - logits_i))) * args.self_distil) * F.mse_loss(all_logits[logits_i], all_logits[logits_j].detach()))
self_distil_losses.append(self_distill_loss_i)
self_distil_loss = sum(self_distil_losses)
loss = sum(losses)
train_metrics.accumulate(batch_size, *losses, print_iter=None)
if (train_metrics_trg is not None):
train_metrics_trg.accumulate(batch_size, *[rest[0], rest[1], rest[2]])
if (train_metrics_average is not None):
train_metrics_average.accumulate(batch_size, *[rest[3], rest[4]])
if ((type(model) is FastTransformer) and (args.self_distil > 0.0)):
(loss + self_distil_loss).backward()
elif ('predict' in args.trg_len_option):
if args.finetune_trg_len:
rest[0].backward()
else:
loss.backward()
else:
loss.backward()
if (args.grad_clip > 0):
total_norm = nn.utils.clip_grad_norm(params, args.grad_clip)
opt.step()
mid_str = ''
if ((type(model) is FastTransformer) and (args.self_distil > 0.0)):
mid_str += 'distil={:.5f}, '.format(self_distil_loss.cpu().data.numpy()[0])
if ((type(model) is FastTransformer) and (args.denoising_prob > 0.0)):
mid_str += ('/'.join(['{:.1f}'.format(ff) for ff in denoising_weights[:(- 1)]]) + ', ')
info = 'update={}, loss={}, {}lr={:.1e}'.format(iters, '/'.join(['{:.3f}'.format(export(ll)) for ll in losses]), mid_str, opt.param_groups[0]['lr'])
if args.no_tqdm:
if ((iters % args.eval_every) == 0):
args.logger.info('update {} : {}'.format(iters, str(train_metrics)))
else:
progressbar.update(1)
progressbar.set_description(info)
if ((((iters + 1) % args.eval_every) == 0) and args.tensorboard and (not args.debug)):
for idx in range(args.train_repeat_dec):
writer.add_scalar('train/single/Loss_{}'.format((idx + 1)), getattr(train_metrics, 'loss_{}'.format((idx + 1))), iters)
if ('predict' in args.trg_len_option):
writer.add_scalar('train/single/pred_target_len_loss', getattr(train_metrics_trg, 'pred_target_len_loss'), iters)
writer.add_scalar('train/single/pred_target_len_correct', getattr(train_metrics_trg, 'pred_target_len_correct'), iters)
writer.add_scalar('train/single/pred_target_len_approx', getattr(train_metrics_trg, 'pred_target_len_approx'), iters)
writer.add_scalar('train/single/average_target_len_correct', getattr(train_metrics_average, 'average_target_len_correct'), iters)
writer.add_scalar('train/single/average_target_len_approx', getattr(train_metrics_average, 'average_target_len_approx'), iters)
train_metrics.reset()
if (train_metrics_trg is not None):
train_metrics_trg.reset()
if (train_metrics_average is not None):
train_metrics_average.reset() |
class RMSpropTF(Optimizer):
def __init__(self, params, lr=0.01, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0.0, centered=False, decoupled_decay=False, lr_in_momentum=True):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= momentum)):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
if (not (0.0 <= alpha)):
raise ValueError('Invalid alpha value: {}'.format(alpha))
defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum)
super(RMSpropTF, self).__init__(params, defaults)
def __setstate__(self, state):
super(RMSpropTF, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('momentum', 0)
group.setdefault('centered', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['square_avg'] = torch.ones_like(p.data)
if (group['momentum'] > 0):
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
one_minus_alpha = (1.0 - group['alpha'])
state['step'] += 1
if (group['weight_decay'] != 0):
if (('decoupled_decay' in group) and group['decoupled_decay']):
p.data.add_((- group['weight_decay']), p.data)
else:
grad = grad.add(group['weight_decay'], p.data)
square_avg.add_(one_minus_alpha, (grad.pow(2) - square_avg))
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.add_(one_minus_alpha, (grad - grad_avg))
avg = square_avg.addcmul((- 1), grad_avg, grad_avg).add(group['eps']).sqrt_()
else:
avg = square_avg.add(group['eps']).sqrt_()
if (group['momentum'] > 0):
buf = state['momentum_buffer']
if (('lr_in_momentum' in group) and group['lr_in_momentum']):
buf.mul_(group['momentum']).addcdiv_(group['lr'], grad, avg)
p.data.add_((- buf))
else:
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_((- group['lr']), buf)
else:
p.data.addcdiv_((- group['lr']), grad, avg)
return loss |
def is_ray_tune_available():
if (not is_ray_available()):
return False
return (importlib.util.find_spec('ray.tune') is not None) |
_module()
class CityscapesSemiDataset(CustomDataset):
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]
def __init__(self, pipeline, img_dir, ann_dir, img_suffix='_leftImg8bit.png', seg_map_suffix='_gtFine_labelTrainIds.png', split=None, split_unlabeled=None, data_root=None, ignore_index=255, reduce_zero_label=False, classes=None, palette=None, clip_length=30, idx_sup=19):
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.ann_dir = ann_dir
self.img_suffix = img_suffix
self.seg_map_suffix = seg_map_suffix
self.split = split
self.split_unlabeled = split_unlabeled
self.data_root = data_root
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = None
self.clip_length = clip_length
self.idx_sup = idx_sup
if (self.data_root is not None):
if (not osp.isabs(self.img_dir)):
self.img_dir = osp.join(self.data_root, self.img_dir)
if (not ((self.ann_dir is None) or osp.isabs(self.ann_dir))):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if (not ((self.split is None) or osp.isabs(self.split))):
self.split = osp.join(self.data_root, self.split)
if (not ((self.split_unlabeled is None) or osp.isabs(self.split_unlabeled))):
self.split_unlabeled = osp.join(self.data_root, self.split_unlabeled)
(self.video_infos_labeled, self.video_infos_unlabeled) = self.load_annotations(self.img_dir, self.img_suffix, self.ann_dir, self.seg_map_suffix, self.split, self.split_unlabeled)
def __len__(self):
return len(self.video_infos_labeled)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix, split, split_unlabeled):
video_infos_labeled = []
with open(split) as f:
lines = f.readlines()
for i in range((len(lines) // self.clip_length)):
video_lines = lines[(i * self.clip_length):((i + 1) * self.clip_length)]
img_infos = []
for line in video_lines:
img_name = line.strip()
img_info = dict(filename=(img_name + img_suffix), ann=(img_name + seg_map_suffix))
img_infos.append(img_info)
video_infos_labeled.append(img_infos)
print_log(f'Loaded {len(video_infos_labeled)} labeled clips', logger=get_root_logger())
video_infos_unlabeled = []
with open(split_unlabeled) as f:
lines = f.readlines()
for i in range((len(lines) // self.clip_length)):
video_lines = lines[(i * self.clip_length):((i + 1) * self.clip_length)]
img_infos = []
for line in video_lines:
img_name = line.strip()
img_info = dict(filename=(img_name + img_suffix), ann=(img_name + seg_map_suffix))
img_infos.append(img_info)
video_infos_unlabeled.append(img_infos)
print_log(f'Loaded {len(video_infos_unlabeled)} unlabeled clips', logger=get_root_logger())
return (video_infos_labeled, video_infos_unlabeled)
def pre_pipeline(self, results):
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
def __getitem__(self, video_idx_0):
idx_v0_0 = self.idx_sup
idx_v0_1 = random.choice([i for i in range(self.clip_length) if (i != idx_v0_0)])
video_idx_1 = random.randint(0, (len(self.video_infos_unlabeled) - 1))
idx_list = [i for i in range(self.clip_length)]
random.shuffle(idx_list)
(idx_v1_0, idx_v1_1) = idx_list[:2]
return self.prepare_train_img(video_idx_0, idx_v0_0, idx_v0_1, video_idx_1, idx_v1_0, idx_v1_1)
def prepare_train_img(self, video_idx_0, idx_v0_0, idx_v0_1, video_idx_1, idx_v1_0, idx_v1_1):
img_info_v0_0 = self.video_infos_labeled[video_idx_0][idx_v0_0]
img_info_v0_1 = self.video_infos_labeled[video_idx_0][idx_v0_1]
img_info_v1_0 = self.video_infos_unlabeled[video_idx_1][idx_v1_0]
img_info_v1_1 = self.video_infos_unlabeled[video_idx_1][idx_v1_1]
results = dict(img_info_v0_0=img_info_v0_0, img_info_v0_1=img_info_v0_1, img_info_v1_0=img_info_v1_0, img_info_v1_1=img_info_v1_1)
self.pre_pipeline(results)
return self.pipeline(results) |
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.OS = platform.system().lower()
self.parent = parent
self.fileName = ''
self.debug = False
self.colorAllChunk = True
self.history = deque(maxlen=20)
self.currentContent = deque(maxlen=1)
self.pressCommand = {'a': u'', 'b': u'', 'c': u'', 'd': u'', 'e': u'', 'f': u'', 'g': u'', 'h': u'', 'i': u'Transaction-', 'j': u'Peron-', 'k': u'Per-', 'l': u'Per-', 'm': u'Per-', 'n': u'Quantity-', 'o': u'Q-', 'p': u'Q-', 'r': u'Q-', 's': u'Q-', 't': u'Policy-', 'u': u'Pol-', 'v': u'Project-', 'w': u'Pro-', 'x': u'Pro-', 'y': u'Pro-', 'z': u'Pro-'}
self.allKey = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.numberKey = ''
self.controlCommand = {'q': 'unTag', 'ctrl+z': 'undo'}
self.labelEntryList = []
self.shortcutLabelList = []
self.file_encoding = 'utf-8'
if (len(self.pressCommand) > 20):
self.textRow = len(self.pressCommand)
else:
self.textRow = 20
self.textColumn = 5
self.tagScheme = 'BMES'
self.onlyNP = False
self.seged = True
self.configFile = 'config'
self.entityRe = '\\[\\*?\\#.*?\\*\\](?!\\#)'
self.insideNestEntityRe = '\\[\\\\[\\(?!\\[\\).*?\\#.*?\\*\\]\\#'
self.entityColor = 'SkyBlue1'
self.insideNestEntityColor = 'light slate blue'
self.selectColor = 'light salmon'
self.maxEventId = 0
self.currentEventId = ''
self.textFontStyle = 'Times'
self.EventIdString = StringVar()
self.initUI()
def initUI(self):
self.parent.title('SUTDEventAnnotetor-V0.6')
self.pack(fill=BOTH, expand=True)
for idx in range(0, self.textColumn):
self.columnconfigure(idx, weight=2)
self.columnconfigure((self.textColumn + 2), weight=1)
self.columnconfigure((self.textColumn + 4), weight=1)
for idx in range(0, 16):
self.rowconfigure(idx, weight=1)
self.lbl = Label(self, text='File: no file is opened')
self.lbl.grid(sticky=W, pady=4, padx=5)
self.fnt = tkFont.Font(font=(self.textFontStyle, 20, 'bold'), size=self.textRow, underline=0)
self.text = Text(self, font=self.fnt, selectbackground=self.selectColor)
self.text.grid(row=1, column=0, columnspan=self.textColumn, rowspan=self.textRow, padx=12, pady=12, sticky=(((E + W) + S) + N))
self.sb = Scrollbar(self)
self.sb.grid(row=1, column=self.textColumn, rowspan=self.textRow, padx=0, sticky=(((E + W) + S) + N))
self.text['yscrollcommand'] = self.sb.set
self.sb['command'] = self.text.yview
abtn = Button(self, text='Open', command=self.onOpen)
abtn.grid(row=1, column=(self.textColumn + 1))
ubtn = Button(self, text='Remap', command=self.renewPressCommand)
ubtn.grid(row=2, column=(self.textColumn + 1), pady=4)
exportbtn = Button(self, text='Export', command=self.generateSequenceFile)
exportbtn.grid(row=3, column=(self.textColumn + 1), pady=4)
cbtn = Button(self, text='Quit', command=self.quit)
cbtn.grid(row=4, column=(self.textColumn + 1), pady=4)
eventIdPlus = Button(self, text='ID+', command=self.increaseEventId)
eventIdPlus.grid(row=5, column=(self.textColumn + 1))
eventIdPlus = Button(self, text='ID -', command=self.decreaseEventId)
eventIdPlus.grid(row=6, column=(self.textColumn + 1))
self.ManualEventIdEntry = Entry(self)
self.ManualEventIdEntry.grid(row=7, column=(self.textColumn + 1), sticky=(((E + W) + S) + N), pady=4, padx=4)
self.ManualEventIdEntry.bind('<Return>', self.EventIdEnter)
self.EventName = Label(self, text='Event: ', foreground='blue', font=(self.textFontStyle, 14, 'bold'))
self.EventName.grid(row=8, column=(self.textColumn + 1), pady=4)
self.EventId = Label(self, textvariable=self.EventIdString, foreground='red', font=(self.textFontStyle, 14, 'bold'))
self.EventId.grid(row=9, column=(self.textColumn + 1), pady=4)
self.EventIdString.set(('MaxId: %s\nCurId: %s' % (self.maxEventId, self.currentEventId)))
self.cursorName = Label(self, text='Cursor: ', foreground='blue', font=(self.textFontStyle, 14, 'bold'))
self.cursorName.grid(row=12, column=(self.textColumn + 1), pady=4)
self.cursorIndex = Label(self, text='', foreground='red', font=(self.textFontStyle, 14, 'bold'))
self.cursorIndex.grid(row=13, column=(self.textColumn + 1), pady=4)
for idx in range(0, len(self.allKey)):
press_key = self.allKey[idx]
self.text.bind(press_key, self.textReturnEnter)
simplePressKey = (('<KeyRelease-' + press_key) + '>')
self.text.bind(simplePressKey, self.deleteTextInput)
if (self.OS != 'windows'):
controlPlusKey = (('<Control-Key-' + press_key) + '>')
self.text.bind(controlPlusKey, self.keepCurrent)
altPlusKey = (('<Command-Key-' + press_key) + '>')
self.text.bind(altPlusKey, self.keepCurrent)
for idx in range(0, len(self.numberKey)):
press_key = self.numberKey[idx]
self.text.bind(press_key, self.numberModel)
self.text.bind('<Control-Key-z>', self.backToHistory)
self.text.bind('<Button-2>', self.rightClick)
self.text.bind('<Button-3>', self.rightClick)
self.text.bind('<Double-Button-1>', self.doubleLeftClick)
self.text.bind('<ButtonRelease-1>', self.singleLeftClick)
self.setMapShow()
def increaseEventId(self):
if self.debug:
print('Action Track: increaseEventId')
if (self.currentEventId == ''):
self.currentEventId = '1'
else:
self.currentEventId = str((int(self.currentEventId) + 1))
if (int(self.currentEventId) > self.maxEventId):
self.maxEventId = int(self.currentEventId)
self.EventIdString.set(('MaxId: %s\nCurId: %s' % (self.maxEventId, self.currentEventId)))
def decreaseEventId(self):
if self.debug:
print('Action Track: decreaseEventId')
if (self.currentEventId == ''):
self.currentEventId = '0'
else:
self.currentEventId = str((int(self.currentEventId) - 1))
if (int(self.currentEventId) > self.maxEventId):
self.maxEventId = int(self.currentEventId)
self.EventIdString.set(('MaxId: %s\nCurId: %s' % (self.maxEventId, self.currentEventId)))
def EventIdEnter(self, event):
if self.debug:
print('Action Track: EventIdEnter')
content = self.ManualEventIdEntry.get()
self.currentEventId = content
self.EventIdString.set(('MaxId: %s\nCurId: %s' % (self.maxEventId, self.currentEventId)))
def numberModel(self, event):
if self.debug:
print('Action Track: numberModel')
print('Block text.')
if (self.currentEventId != ''):
self.currentEventId = str(((int(self.currentEventId) * 10) + int(event.char)))
else:
self.currentEventId = event.char
if (int(self.currentEventId) > self.maxEventId):
self.maxEventId = int(self.currentEventId)
print(('Current event id: %s, Max event id: %s' % (self.currentEventId, self.maxEventId)))
eventIds = ('Max_Id: %s\nCur_Id: %s' % (self.maxEventId, self.currentEventId))
self.EventId.config(text=eventIds)
self.text.config(state=DISABLED)
def singleLeftClick(self, event):
if self.debug:
print('Action Track: singleLeftClick')
cursor_index = self.text.index(INSERT)
row_column = cursor_index.split('.')
cursor_text = ('RowId: %s\nColId: %s' % (row_column[0], row_column[(- 1)]))
self.cursorIndex.configure(text=cursor_text)
def doubleLeftClick(self, event):
if self.debug:
print('Action Track: doubleLeftClick')
pass
def rightClick(self, event):
if self.debug:
print('Action Track: rightClick')
try:
firstSelection_index = self.text.index(SEL_FIRST)
cursor_index = self.text.index(SEL_LAST)
content = self.text.get('1.0', 'end-1c')
self.writeFile(self.fileName, content, cursor_index)
except TclError:
pass
def onOpen(self):
ftypes = [('all files', '.*'), ('text files', '.txt'), ('ann files', '.ann')]
dlg = tkFileDialog.Open(self, filetypes=ftypes)
fl = dlg.show()
if (fl != ''):
self.text.delete('1.0', END)
text = self.readFile(fl)
self.text.insert(END, text)
self.setNameLabel(('File: ' + fl))
self.setDisplay()
self.text.mark_set(INSERT, '1.0')
self.setCursorLabel(self.text.index(INSERT))
def readFile(self, filename):
f = open(filename)
try:
text = f.read()
self.file_encoding = f.encoding
except UnicodeDecodeError:
f = open(filename, encoding='utf-8')
text = f.read()
self.fileName = filename
return text
def setFont(self, value):
_family = self.textFontStyle
_size = value
_weight = 'bold'
_underline = 0
fnt = tkFont.Font(family=_family, size=_size, weight=_weight, underline=_underline)
Text(self, font=fnt)
def setNameLabel(self, new_file):
self.lbl.config(text=new_file)
def setCursorLabel(self, cursor_index):
if self.debug:
print('Action Track: setCursorLabel')
row_column = cursor_index.split('.')
cursor_text = ('RowId: %s\nColId: %s' % (row_column[0], row_column[(- 1)]))
self.cursorIndex.config(text=cursor_text)
def returnButton(self):
if self.debug:
print('Action Track: returnButton')
self.pushToHistory()
content = self.entry.get()
self.clearCommand()
self.executeEntryCommand(content)
return content
def returnEnter(self, event):
if self.debug:
print('Action Track: returnEnter')
self.pushToHistory()
content = self.entry.get()
self.clearCommand()
self.executeEntryCommand(content)
return content
def textReturnEnter(self, event):
press_key = event.char
if self.debug:
print('Action Track: textReturnEnter, press:', press_key)
self.pushToHistory()
print('event: ', press_key)
self.text.configure(state='normal')
self.executeCursorCommand(press_key.lower())
return press_key
def backToHistory(self, event):
if self.debug:
print('Action Track: backToHistory')
if (len(self.history) > 0):
historyCondition = self.history.pop()
historyContent = historyCondition[0]
cursorIndex = historyCondition[1]
self.writeFile(self.fileName, historyContent, cursorIndex)
else:
print('History is empty!')
self.text.insert(INSERT, 'p')
def keepCurrent(self, event):
if self.debug:
print('Action Track: keepCurrent')
print(('keep current, insert:%s' % INSERT))
print('before:', self.text.index(INSERT))
self.text.insert(INSERT, 'p')
print('after:', self.text.index(INSERT))
def clearCommand(self):
if self.debug:
print('Action Track: clearCommand')
self.entry.delete(0, 'end')
def getText(self):
textContent = self.text.get('1.0', 'end-1c')
textContent = textContent
return textContent
def executeCursorCommand(self, command):
if self.debug:
print('Action Track: executeCursorCommand')
content = self.getText()
print(('Command:' + command))
try:
firstSelection_index = self.text.index(SEL_FIRST)
cursor_index = self.text.index(SEL_LAST)
aboveHalf_content = self.text.get('1.0', firstSelection_index)
followHalf_content = self.text.get(firstSelection_index, 'end-1c')
selected_string = self.text.selection_get()
if (re.match(self.entityRe, selected_string) != None):
new_string_list = selected_string.strip('[]').rsplit('#', 1)
new_string = new_string_list[0]
followHalf_content = followHalf_content.replace(selected_string, new_string, 1)
selected_string = new_string
cursor_index = ('%s - %sc' % (cursor_index, str((len(new_string_list[1]) + 4))))
if (command == 'q'):
print('q: remove entity label')
elif (len(selected_string) > 0):
(followHalf_content, cursor_index) = self.replaceString(followHalf_content, selected_string, command, cursor_index)
content = (aboveHalf_content + followHalf_content)
content = content
self.writeFile(self.fileName, content, cursor_index)
except TclError:
cursor_index = self.text.index(INSERT)
[line_id, column_id] = cursor_index.split('.')
aboveLine_content = self.text.get('1.0', (str((int(line_id) - 1)) + '.end'))
belowLine_content = self.text.get((str((int(line_id) + 1)) + '.0'), 'end-1c')
line = self.text.get((line_id + '.0'), (line_id + '.end'))
matched_span = ((- 1), (- 1))
for match in re.finditer(self.entityRe, line):
if (match.span()[0] <= (int(column_id) & int(column_id)) <= match.span()[1]):
matched_span = match.span()
break
if (matched_span[1] > 0):
selected_string = line[matched_span[0]:matched_span[1]]
new_string_list = selected_string.strip('[]').rsplit('#', 1)
new_string = new_string_list[0]
line_before_entity = line[:matched_span[0]]
line_after_entity = (new_string + line[matched_span[1]:])
selected_string = new_string
cursor_index = ((line_id + '.') + str((int(matched_span[1]) - (len(new_string_list[1]) + 4))))
if (command == 'q'):
print('q: remove entity label')
elif (len(selected_string) > 0):
if (command in self.pressCommand):
(line_after_entity, cursor_index) = self.replaceString(line_after_entity, selected_string, command, cursor_index)
else:
return
line = (line_before_entity + line_after_entity)
if (aboveLine_content != ''):
aboveLine_content = (aboveLine_content + '\n')
if (belowLine_content != ''):
belowLine_content = ('\n' + belowLine_content)
content = ((aboveLine_content + line) + belowLine_content)
content = content
self.writeFile(self.fileName, content, cursor_index)
def executeEntryCommand(self, command):
if self.debug:
print('Action Track: executeEntryCommand')
if (len(command) == 0):
currentCursor = self.text.index(INSERT)
newCurrentCursor = (str((int(currentCursor.split('.')[0]) + 1)) + '.0')
self.text.mark_set(INSERT, newCurrentCursor)
self.setCursorLabel(newCurrentCursor)
else:
command_list = decompositCommand(command)
for idx in range(0, len(command_list)):
command = command_list[idx]
if (len(command) == 2):
select_num = int(command[0])
command = command[1]
content = self.getText()
cursor_index = self.text.index(INSERT)
newcursor_index = ((cursor_index.split('.')[0] + '.') + str((int(cursor_index.split('.')[1]) + select_num)))
selected_string = self.text.get(cursor_index, newcursor_index)
aboveHalf_content = self.text.get('1.0', cursor_index)
followHalf_content = self.text.get(cursor_index, 'end-1c')
if (command in self.pressCommand):
if (len(selected_string) > 0):
(followHalf_content, newcursor_index) = self.replaceString(followHalf_content, selected_string, command, newcursor_index)
content = (aboveHalf_content + followHalf_content)
self.writeFile(self.fileName, content, newcursor_index)
def deleteTextInput(self, event):
if self.debug:
print('Action Track: deleteTextInput')
get_insert = self.text.index(INSERT)
print('delete insert:', get_insert)
insert_list = get_insert.split('.')
last_insert = ((insert_list[0] + '.') + str((int(insert_list[1]) - 1)))
get_input = self.text.get(last_insert, get_insert)
aboveHalf_content = self.text.get('1.0', last_insert)
followHalf_content = self.text.get(last_insert, 'end-1c')
if (len(get_input) > 0):
followHalf_content = followHalf_content.replace(get_input, '', 1)
content = (aboveHalf_content + followHalf_content)
self.writeFile(self.fileName, content, last_insert)
def replaceString(self, content, string, replaceType, cursor_index):
if (replaceType in self.pressCommand):
new_string = ((((('[' + string) + '#') + self.currentEventId) + self.pressCommand[replaceType]) + '*]')
cursor_indexList = cursor_index.split('.')
newcursor_index = ('%s + %sc' % (cursor_index, str((len(self.pressCommand[replaceType]) + 5))))
else:
print('Invaild command!')
print('cursor index: ', self.text.index(INSERT))
return (content, cursor_index)
content = content.replace(string, new_string, 1)
eventIds = ('MaxId: %s\nCurId: %s' % (self.maxEventId, self.currentEventId))
self.EventId.config(text=eventIds)
return (content, newcursor_index)
def writeFile(self, fileName, content, newcursor_index):
if (len(fileName) > 0):
if ('.ann' in fileName):
new_name = fileName
ann_file = open(new_name, 'w', encoding=self.file_encoding)
ann_file.write(content)
ann_file.close()
else:
new_name = (fileName + '.ann')
ann_file = open(new_name, 'w', encoding=self.file_encoding)
ann_file.write(content)
ann_file.close()
self.autoLoadNewFile(new_name, newcursor_index)
else:
print("Don't write to empty file!")
def autoLoadNewFile(self, fileName, newcursor_index):
if self.debug:
print('Action Track: autoLoadNewFile')
if (len(fileName) > 0):
self.text.delete('1.0', END)
text = self.readFile(fileName)
self.text.insert('end-1c', text)
self.setNameLabel(('File: ' + fileName))
self.text.mark_set(INSERT, newcursor_index)
self.text.see(newcursor_index)
self.setCursorLabel(newcursor_index)
self.setColorDisplay()
def setColorDisplay(self):
if self.debug:
print('Action Track: setColorDisplay')
self.text.config(insertbackground='red', insertwidth=4, font=self.fnt)
countVar = StringVar()
currentCursor = self.text.index(INSERT)
lineStart = (currentCursor.split('.')[0] + '.0')
lineEnd = (currentCursor.split('.')[0] + '.end')
if self.colorAllChunk:
self.text.mark_set('matchStart', '1.0')
self.text.mark_set('matchEnd', '1.0')
self.text.mark_set('searchLimit', 'end-1c')
else:
self.text.mark_set('matchStart', lineStart)
self.text.mark_set('matchEnd', lineStart)
self.text.mark_set('searchLimit', lineEnd)
while True:
self.text.tag_configure('catagory', background=self.entityColor)
self.text.tag_configure('edge', background=self.entityColor)
pos = self.text.search(self.entityRe, 'matchEnd', 'searchLimit', count=countVar, regexp=True)
if (pos == ''):
break
self.text.mark_set('matchStart', pos)
self.text.mark_set('matchEnd', ('%s+%sc' % (pos, countVar.get())))
first_pos = pos
second_pos = ('%s+%sc' % (pos, str(1)))
lastsecond_pos = ('%s+%sc' % (pos, str((int(countVar.get()) - 1))))
last_pos = ('%s + %sc' % (pos, countVar.get()))
self.text.tag_add('catagory', second_pos, lastsecond_pos)
self.text.tag_add('edge', first_pos, second_pos)
self.text.tag_add('edge', lastsecond_pos, last_pos)
if self.colorAllChunk:
self.text.mark_set('matchStart', '1.0')
self.text.mark_set('matchEnd', '1.0')
self.text.mark_set('searchLimit', 'end-1c')
else:
self.text.mark_set('matchStart', lineStart)
self.text.mark_set('matchEnd', lineStart)
self.text.mark_set('searchLimit', lineEnd)
while True:
self.text.tag_configure('insideEntityColor', background=self.insideNestEntityColor)
pos = self.text.search(self.insideNestEntityRe, 'matchEnd', 'searchLimit', count=countVar, regexp=True)
if (pos == ''):
break
self.text.mark_set('matchStart', pos)
self.text.mark_set('matchEnd', ('%s+%sc' % (pos, countVar.get())))
first_pos = ('%s + %sc' % (pos, 2))
last_pos = ('%s + %sc' % (pos, str((int(countVar.get()) - 1))))
self.text.tag_add('insideEntityColor', first_pos, last_pos)
def setDisplay(self):
if self.debug:
print('Action Track: setDisplay')
self.text.config(insertbackground='red', insertwidth=4)
self.text.mark_set('matchStart', '1.0')
self.text.mark_set('matchEnd', '1.0')
self.text.mark_set('searchLimit', 'end-1c')
countVar = StringVar()
while True:
self.text.tag_configure('catagory', background=self.entityColor)
self.text.tag_configure('edge', background=self.entityColor)
pos = self.text.search(self.entityRe, 'matchEnd', 'searchLimit', count=countVar, regexp=True)
if (pos == ''):
break
self.text.mark_set('matchStart', pos)
self.text.mark_set('matchEnd', ('%s+%sc' % (pos, countVar.get())))
first_pos = pos
second_pos = ('%s+%sc' % (pos, str(1)))
lastsecond_pos = ('%s+%sc' % (pos, str((int(countVar.get()) - 1))))
last_pos = ('%s + %sc' % (pos, countVar.get()))
self.text.tag_add('catagory', second_pos, lastsecond_pos)
self.text.tag_add('edge', first_pos, second_pos)
self.text.tag_add('edge', lastsecond_pos, last_pos)
self.text.mark_set('matchEnd', '1.0')
self.text.mark_set('searchLimit', 'end-1c')
while True:
self.text.tag_configure('insideEntityColor', background=self.insideNestEntityColor)
pos = self.text.search(self.insideNestEntityRe, 'matchEnd', 'searchLimit', count=countVar, regexp=True)
if (pos == ''):
break
self.text.mark_set('matchStart', pos)
self.text.mark_set('matchEnd', ('%s+%sc' % (pos, countVar.get())))
first_pos = ('%s + %sc' % (pos, 2))
last_pos = ('%s + %sc' % (pos, str((int(countVar.get()) - 1))))
self.text.tag_add('insideEntityColor', first_pos, last_pos)
def pushToHistory(self):
if self.debug:
print('Action Track: pushToHistory')
currentList = []
content = self.getText()
cursorPosition = self.text.index(INSERT)
currentList.append(content)
currentList.append(cursorPosition)
self.history.append(currentList)
def pushToHistoryEvent(self, event):
if self.debug:
print('Action Track: pushToHistoryEvent')
currentList = []
content = self.getText()
cursorPosition = self.text.index(INSERT)
currentList.append(content)
currentList.append(cursorPosition)
self.history.append(currentList)
def renewPressCommand(self):
if self.debug:
print('Action Track: renewPressCommand')
seq = 0
new_dict = {}
listLength = len(self.labelEntryList)
delete_num = 0
for key in sorted(self.pressCommand):
label = self.labelEntryList[seq].get()
if (len(label) > 0):
new_dict[key] = label
else:
delete_num += 1
seq += 1
self.pressCommand = new_dict
for idx in range(1, (delete_num + 1)):
self.labelEntryList[(listLength - idx)].delete(0, END)
self.shortcutLabelList[(listLength - idx)].config(text='NON= ')
with open(self.configFile, 'wb') as fp:
pickle.dump(self.pressCommand, fp)
self.setMapShow()
def setMapShow(self):
if os.path.isfile(self.configFile):
with open(self.configFile, 'rb') as fp:
self.pressCommand = pickle.load(fp)
hight = len(self.pressCommand)
width = 2
row = 0
mapLabel = Label(self, text='Shortcuts map Labels', foreground='blue', font=(self.textFontStyle, 14, 'bold'))
mapLabel.grid(row=0, column=(self.textColumn + 2), columnspan=2, rowspan=1, padx=10)
self.labelEntryList = []
self.shortcutLabelList = []
for key in sorted(self.pressCommand):
row += 1
symbolLabel = Label(self, text=(key.upper() + ': '), foreground='blue', font=(self.textFontStyle, 14, 'bold'))
symbolLabel.grid(row=row, column=(self.textColumn + 2), columnspan=1, rowspan=1, padx=3)
self.shortcutLabelList.append(symbolLabel)
labelEntry = Entry(self, foreground='blue', font=(self.textFontStyle, 14, 'bold'))
labelEntry.insert(0, self.pressCommand[key])
labelEntry.grid(row=row, column=(self.textColumn + 3), columnspan=1, rowspan=1)
self.labelEntryList.append(labelEntry)
def getCursorIndex(self):
return self.text.index(INSERT)
def generateSequenceFile(self):
if (('.ann' not in self.fileName) and ('.txt' not in self.fileName)):
print('Export only works on filename ended in .ann or .txt! Please rename file.')
return (- 1)
fileLines = open(self.fileName, 'rU').readlines()
lineNum = len(fileLines)
new_filename = (self.fileName.split('.ann')[0] + '.anns')
seqFile = open(new_filename, 'w')
for line in fileLines:
if (len(line) <= 2):
seqFile.write('\n')
continue
else:
wordTagPairs = getWordTagPairs(line, self.seged, self.tagScheme, self.onlyNP, self.entityRe)
for wordTag in wordTagPairs:
seqFile.write(wordTag)
seqFile.write('\n')
seqFile.close()
print('Exported file into sequence style in file: ', new_filename)
print('Line number:', lineNum) |
class _InputInjection(nn.Module):
def __init__(self, ratio):
super(_InputInjection, self).__init__()
self.pool = nn.ModuleList()
for i in range(0, ratio):
self.pool.append(nn.AvgPool2d(3, 2, 1))
def forward(self, x):
for pool in self.pool:
x = pool(x)
return x |
class TestLoadCaffe():
def test_load_caffe(self):
resource_path = os.path.join(os.path.split(__file__)[0], '../resources')
proto_txt = os.path.join(resource_path, 'test.prototxt')
model_path = os.path.join(resource_path, 'test.caffemodel')
module = Sequential().add(SpatialConvolution(3, 4, 2, 2).set_name('conv')).add(SpatialConvolution(4, 3, 2, 2).set_name('conv2')).add(Linear(27, 2, with_bias=False).set_name('ip'))
model = Model.load_caffe(module, proto_txt, model_path, bigdl_type='float')
parameters = model.parameters()
conv1_weight = np.array([0., 0., 0., (- 0.), 0., 0., 0., 0., 0., (- 0.), 0., 0., 0., (- 0.), 0., (- 0.), (- 0.), 0., 0., (- 0.), (- 0.), 0., (- 0.), (- 0.), (- 0.), 0., (- 0.), 0., (- 0.), (- 0.), (- 0.), 0., 0., (- 0.), (- 0.), (- 0.), (- 0.), 0., 0., 0., 0., (- 0.), 0., 0., (- 0.), (- 0.), (- 0.), (- 0.)]).astype('float').reshape((1, 4, 3, 2, 2))
conv1_bias = np.array([0., (- 0.), (- 0.), 0.]).astype('float')
conv2_weight = np.array([0., 0., 0., (- 0.), (- 0.), 0., (- 0.), 0., 0., 0., (- 0.), (- 0.), (- 0.), (- 0.), 0., (- 0.), 0., (- 0.), 9.52507e-05, 0., 0., 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., (- 0.), 0., 0., (- 0.), (- 0.), (- 0.), 0., 0., (- 0.), 0., 0., 0., (- 0.), (- 0.), 0., (- 0.)]).astype('float').reshape((1, 3, 4, 2, 2))
conv2_bias = np.array([0, 0, 0]).astype('float')
linear_weight = np.array([0., 0., 0., 0., (- 0.), (- 0.), (- 0.), 0., 0., 0., (- 0.), (- 0.), 0., 0., (- 0.), (- 0.), 0., 0., (- 0.), 0., (- 0.), 0., (- 0.), (- 0.), 0., (- 0.), 0., (- 0.), 0., 0., (- 0.), 0., (- 0.), 0., (- 0.), (- 0.), (- 0.), 0., (- 0.), (- 0.), (- 0.), 0., 0., 0., 0., 0., 0., 0., (- 0.), 0., (- 0.), (- 0.), 0., 0.]).astype('float').reshape((2, 27))
assert_allclose(parameters['conv']['weight'], conv1_weight, atol=1e-06, rtol=0)
assert_allclose(parameters['conv']['bias'], conv1_bias, atol=1e-06, rtol=0)
assert_allclose(parameters['conv2']['weight'], conv2_weight, atol=1e-06, rtol=0)
assert_allclose(parameters['conv2']['bias'], conv2_bias, atol=1e-06, rtol=0)
assert_allclose(parameters['ip']['weight'], linear_weight, atol=1e-06, rtol=0)
module = Sequential().add(SpatialConvolution(3, 4, 2, 2).set_name('conv')).add(SpatialConvolution(4, 3, 2, 2).set_name('conv3')).add(Linear(27, 2, with_bias=False).set_name('ip'))
model = Model.load_caffe(module, proto_txt, model_path, match_all=False)
parameters = model.parameters()
assert_allclose(parameters['conv']['weight'], conv1_weight, atol=1e-06, rtol=0)
assert_allclose(parameters['conv']['bias'], conv1_bias, atol=1e-06, rtol=0)
assert (not np.allclose(parameters['conv3']['weight'], conv2_weight, atol=1e-06, rtol=0))
assert (not np.allclose(parameters['conv3']['bias'], conv2_bias, atol=1e-06, rtol=0))
assert_allclose(parameters['ip']['weight'], linear_weight, atol=1e-06, rtol=0)
model = Model.load_caffe_model(proto_txt, model_path, bigdl_type='float')
parameters = model.parameters()
assert_allclose(parameters['conv']['weight'], conv1_weight, atol=1e-06, rtol=0)
assert_allclose(parameters['conv']['bias'], conv1_bias, atol=1e-06, rtol=0)
assert_allclose(parameters['conv2']['weight'], conv2_weight, atol=1e-06, rtol=0)
assert_allclose(parameters['conv2']['bias'], conv2_bias, atol=1e-06, rtol=0)
assert_allclose(parameters['ip']['weight'], linear_weight, atol=1e-06, rtol=0) |
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1, norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if (residual is None):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out |
class OzoneTrainDataset(DelimitTrainDataset):
def __init__(self, target: str='all', root: str=None, ozone_root: str=None, use_fixed: float=0.1, seq_duration: Optional[float]=6.0, samples_per_track: int=64, source_augmentations: Optional[Callable]=(lambda audio: audio), sample_rate: int=44100, seed: int=42, limitaug_method: str='limitaug', limitaug_mode: str='normal_L', limitaug_custom_target_lufs: float=None, limitaug_custom_target_lufs_std: float=None, target_loudnorm_lufs: float=(- 14.0), target_limitaug_mode: str=None, target_limitaug_custom_target_lufs: float=None, target_limitaug_custom_target_lufs_std: float=None, custom_limiter_attack_range: list=[2.0, 2.0], custom_limiter_release_range: list=[200.0, 200.0], *args, **kwargs) -> None:
super().__init__(target, root, seq_duration, samples_per_track, source_augmentations, sample_rate, seed, limitaug_method, limitaug_mode, limitaug_custom_target_lufs, limitaug_custom_target_lufs_std, target_loudnorm_lufs, target_limitaug_mode, target_limitaug_custom_target_lufs, target_limitaug_custom_target_lufs_std, custom_limiter_attack_range, custom_limiter_release_range, *args, **kwargs)
self.ozone_root = ozone_root
self.use_fixed = use_fixed
self.list_train_fixed = glob.glob(f'{self.ozone_root}/ozone_train_fixed/*.wav')
self.list_train_random = glob.glob(f'{self.ozone_root}/ozone_train_random/*.wav')
self.dict_train_random = {}
list_csv_files = glob.glob(f'{self.ozone_root}/ozone_train_random_*.csv')
list_csv_files.sort()
for csv_file in list_csv_files:
with open(csv_file, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
self.dict_train_random[row[0]] = {'max_threshold': float(row[1]), 'max_character': float(row[2]), 'vocals': {'name': row[3], 'start_sec': float(row[4]), 'gain': float(row[5]), 'channelswap': bool(row[6])}, 'bass': {'name': row[7], 'start_sec': float(row[8]), 'gain': float(row[9]), 'channelswap': bool(row[10])}, 'drums': {'name': row[11], 'start_sec': float(row[12]), 'gain': float(row[13]), 'channelswap': bool(row[14])}, 'other': {'name': row[15], 'start_sec': float(row[16]), 'gain': float(row[17]), 'channelswap': bool(row[18])}}
def __getitem__(self, idx):
use_fixed_prob = random.random()
if (use_fixed_prob <= self.use_fixed):
audio_path = random.choice(self.list_train_fixed)
song_name = os.path.basename(audio_path).replace('.wav', '')
(mixture_limited, start_pos_sec) = load_wav_arbitrary_position_stereo(audio_path, self.sample_rate, self.seq_duration, return_pos=True)
audio_sources = []
track_path = f'{self.root}/train/{song_name}'
for source in self.sources:
audio_path = f'{track_path}/{source}.wav'
audio = load_wav_specific_position_stereo(audio_path, self.sample_rate, self.seq_duration, start_position=start_pos_sec)
audio_sources.append(audio)
else:
audio_path = random.choice(self.list_train_random)
seg_name = os.path.basename(audio_path).replace('.wav', '')
(mixture_limited, sr) = librosa.load(audio_path, sr=self.sample_rate, mono=False)
audio_sources = []
for source in self.sources:
dict_seg_info = self.dict_train_random[seg_name]
dict_seg_source_info = dict_seg_info[source]
audio_path = f"{self.root}/train/{dict_seg_source_info['name']}/{source}.wav"
audio = load_wav_specific_position_stereo(audio_path, self.sample_rate, self.seq_duration, start_position=dict_seg_source_info['start_sec'])
audio = (audio * dict_seg_source_info['gain'])
if dict_seg_source_info['channelswap']:
audio = np.flip(audio, axis=0)
audio_sources.append(audio)
stems = np.stack(audio_sources, axis=0)
mixture = stems.sum(axis=0)
mixture_lufs = self.meter.integrated_loudness(mixture.T)
if np.isinf(mixture_lufs):
mixture_loudnorm = mixture
else:
augmented_gain = (self.target_loudnorm_lufs - mixture_lufs)
mixture_loudnorm = (mixture * db2linear(augmented_gain, eps=0.0))
return (mixture_limited, mixture_loudnorm) |
class Generator(abc.ABC):
def __init__(self, shelf_rows: int, shelf_columns: int, column_height: int, num_agents: int, sensor_range: int, request_queue_size: int) -> None:
if ((shelf_columns % 2) != 1):
raise ValueError('Environment argument: `shelf_columns`, must be an odd number.')
self._shelf_rows = shelf_rows
self._shelf_columns = shelf_columns
self._column_height = column_height
self._num_agents = num_agents
self._sensor_range = sensor_range
self._request_queue_size = request_queue_size
self._grid_size = ((((column_height + 1) * shelf_rows) + 2), (((2 + 1) * shelf_columns) + 1))
self._agent_ids = jnp.arange(num_agents)
def shelf_rows(self) -> int:
return self._shelf_rows
def shelf_columns(self) -> int:
return self._shelf_columns
def column_height(self) -> int:
return self._column_height
def grid_size(self) -> chex.Array:
return self._grid_size
def num_agents(self) -> int:
return self._num_agents
def sensor_range(self) -> int:
return self._sensor_range
def request_queue_size(self) -> int:
return self._request_queue_size
def agent_ids(self) -> chex.Array:
return self._agent_ids
def shelf_ids(self) -> chex.Array:
def not_in_queue_size(self) -> chex.Array:
def highways(self) -> chex.Array:
def goals(self) -> chex.Array:
def __call__(self, key: chex.PRNGKey) -> State: |
class VUAProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[2]
label = line[1]
POS = line[3]
FGPOS = line[4]
if (len(line) == 8):
index = line[5]
text_a_2 = line[6]
index_2 = line[7]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=index, label=label, POS=POS, FGPOS=FGPOS, text_a_2=text_a_2, text_b_2=index_2))
else:
index = line[(- 1)]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=index, label=label, POS=POS, FGPOS=FGPOS))
return examples |
def test_sequential_sar_decoder():
decoder = SequentialSARDecoder(num_classes=37, padding_idx=36, max_seq_len=5)
decoder.init_weights()
decoder.train()
(feat, out_enc, tgt_dict, img_metas) = _create_dummy_input()
with pytest.raises(AssertionError):
decoder(feat, out_enc, tgt_dict, [])
with pytest.raises(AssertionError):
decoder(feat, out_enc, tgt_dict, (img_metas * 2))
out_train = decoder(feat, out_enc, tgt_dict, img_metas, True)
assert (out_train.shape == torch.Size([1, 5, 36]))
out_test = decoder(feat, out_enc, tgt_dict, img_metas, False)
assert (out_test.shape == torch.Size([1, 5, 36])) |
def gptneox_sample_token_mirostat_v2(ctx: gptneox_context_p, candidates, tau: c_float, eta: c_float, mu) -> gptneox_token:
return _lib.gptneox_sample_token_mirostat_v2(ctx, candidates, tau, eta, mu) |
def _padleft(width, s, has_invisible=True):
iwidth = (((width + len(s)) - len(_strip_invisible(s))) if has_invisible else width)
fmt = ('{0:>%ds}' % iwidth)
return fmt.format(s) |
class ImageDataset(Dataset):
def __init__(self, file_paths: Iterable, transform=None, read_func: Callable=read_image_tensor):
self.file_paths = file_paths
self.transform = transform
def __getitem__(self, idx: int) -> dict:
file = self.file_paths[idx]
img = read_image_tensor(file, self.transform)
return (img, file.name)
def __len__(self) -> int:
return len(self.file_paths) |
def class_balance(data_path: str, split_type: str):
(args.val_fold_index, args.test_fold_index) = (1, 2)
args.split_type = 'predetermined'
data = get_data(path=args.data_path, smiles_column=args.smiles_column, target_columns=args.target_columns)
args.task_names = (args.target_columns or get_task_names(path=args.data_path, smiles_column=args.smiles_column))
all_class_sizes = {'train': [], 'val': [], 'test': []}
for i in range(10):
print(f'Fold {i}')
data_name = os.path.splitext(os.path.basename(data_path))[0]
args.folds_file = f'/data/rsg/chemistry/yangk/lsc_experiments_dump_splits/data/{data_name}/{split_type}/fold_{i}/0/split_indices.pckl'
if (not os.path.exists(args.folds_file)):
print(f'Fold indices do not exist')
continue
(train_data, val_data, test_data) = split_data(data=data, split_type=args.split_type, args=args)
for (data_split, split_name) in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
class_sizes = get_class_sizes(data_split)
print(f'Class sizes for {split_name}')
for (i, task_class_sizes) in enumerate(class_sizes):
print(f"{args.task_names[i]} {', '.join((f'{cls}: {(size * 100):.2f}%' for (cls, size) in enumerate(task_class_sizes)))}")
all_class_sizes[split_name].append(class_sizes)
print()
for split_name in ['train', 'val', 'test']:
print(f'Average class sizes for {split_name}')
(mean_class_sizes, std_class_sizes) = (np.mean(all_class_sizes[split_name], axis=0), np.std(all_class_sizes[split_name], axis=0))
for (i, (mean_task_class_sizes, std_task_class_sizes)) in enumerate(zip(mean_class_sizes, std_class_sizes)):
print(f"{args.task_names[i]} {', '.join((f'{cls}: {(mean_size * 100):.2f}% +/- {(std_size * 100):.2f}%' for (cls, (mean_size, std_size)) in enumerate(zip(mean_task_class_sizes, std_task_class_sizes))))}") |
def load_image(filename, is_srgb=True):
if (not filename):
raise ValueError('Empty filename')
image = (np.asarray(Image.open(filename)).astype(np.float) / 255.0)
if is_srgb:
return srgb_to_rgb(image)
else:
return image |
class SubGymMarketsDailyInvestorEnv_v0(AbidesGymMarketsEnv):
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = markets_agent_utils.ignore_mkt_data_buffer_decorator
def __init__(self, background_config: str='rmsc04', mkt_close: str='16:00:00', timestep_duration: str='60s', starting_cash: int=1000000, order_fixed_size: int=10, state_history_length: int=4, market_data_buffer_length: int=5, first_interval: str='00:05:00', reward_mode: str='dense', done_ratio: float=0.3, debug_mode: bool=False, background_config_extra_kvargs={}) -> None:
self.background_config: Any = importlib.import_module('abides_markets.configs.{}'.format(background_config), package=None)
self.mkt_close: NanosecondTime = str_to_ns(mkt_close)
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration)
self.starting_cash: int = starting_cash
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.reward_mode: str = reward_mode
self.done_ratio: float = done_ratio
self.debug_mode: bool = debug_mode
self.down_done_condition: float = (self.done_ratio * starting_cash)
assert (background_config in ['rmsc03', 'rmsc04', 'smc_01']), 'Select rmsc03, rmsc04 or smc_01 as config'
assert ((self.first_interval <= str_to_ns('16:00:00')) & (self.first_interval >= str_to_ns('00:00:00'))), 'Select authorized FIRST_INTERVAL delay'
assert ((self.mkt_close <= str_to_ns('16:00:00')) & (self.mkt_close >= str_to_ns('09:30:00'))), 'Select authorized market hours'
assert (reward_mode in ['sparse', 'dense']), 'reward_mode needs to be dense or sparse'
assert ((self.timestep_duration <= str_to_ns('06:30:00')) & (self.timestep_duration >= str_to_ns('00:00:00'))), 'Select authorized timestep_duration'
assert ((type(self.starting_cash) == int) & (self.starting_cash >= 0)), 'Select positive integer value for starting_cash'
assert ((type(self.order_fixed_size) == int) & (self.order_fixed_size >= 0)), 'Select positive integer value for order_fixed_size'
assert ((type(self.state_history_length) == int) & (self.state_history_length >= 0)), 'Select positive integer value for order_fixed_size'
assert ((type(self.market_data_buffer_length) == int) & (self.market_data_buffer_length >= 0)), 'Select positive integer value for order_fixed_size'
assert (((type(self.done_ratio) == float) & (self.done_ratio >= 0)) & (self.done_ratio < 1)), 'Select positive float value for order_fixed_size between 0 and 1'
assert (debug_mode in [True, False]), 'reward_mode needs to be True or False'
background_config_args = {'end_time': self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(background_config_pair=(self.background_config.build_config, background_config_args), wakeup_interval_generator=ConstantTimeGenerator(step_duration=self.timestep_duration), starting_cash=self.starting_cash, state_buffer_length=self.state_history_length, market_data_buffer_length=self.market_data_buffer_length, first_interval=self.first_interval)
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
self.num_state_features: int = ((4 + self.state_history_length) - 1)
self.state_highs: np.ndarray = np.array(([np.finfo(np.float32).max, 1.0, np.finfo(np.float32).max, np.finfo(np.float32).max] + ((self.state_history_length - 1) * [np.finfo(np.float32).max])), dtype=np.float32).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(([np.finfo(np.float32).min, 0.0, np.finfo(np.float32).min, np.finfo(np.float32).min] + ((self.state_history_length - 1) * [np.finfo(np.float32).min])), dtype=np.float32).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(self.state_lows, self.state_highs, shape=(self.num_state_features, 1), dtype=np.float32)
self.previous_marked_to_market = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(self, action: int) -> List[Dict[(str, Any)]]:
if (action == 0):
return [{'type': 'MKT', 'direction': 'BUY', 'size': self.order_fixed_size}]
elif (action == 1):
return []
elif (action == 2):
return [{'type': 'MKT', 'direction': 'SELL', 'size': self.order_fixed_size}]
else:
raise ValueError(f'Action {action} is not part of the actions supported by the function.')
_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[(str, Any)]) -> np.ndarray:
bids = raw_state['parsed_mkt_data']['bids']
asks = raw_state['parsed_mkt_data']['asks']
last_transactions = raw_state['parsed_mkt_data']['last_transaction']
holdings = raw_state['internal_data']['holdings']
imbalances = [markets_agent_utils.get_imbalance(b, a, depth=3) for (b, a) in zip(bids, asks)]
mid_prices = [markets_agent_utils.get_mid_price(b, a, lt) for (b, a, lt) in zip(bids, asks, last_transactions)]
returns = np.diff(mid_prices)
padded_returns = np.zeros((self.state_history_length - 1))
padded_returns[(- len(returns)):] = (returns if (len(returns) > 0) else padded_returns)
best_bids = [(bids[0][0] if (len(bids) > 0) else mid) for (bids, mid) in zip(bids, mid_prices)]
best_asks = [(asks[0][0] if (len(asks) > 0) else mid) for (asks, mid) in zip(asks, mid_prices)]
spreads = (np.array(best_asks) - np.array(best_bids))
direction_features = (np.array(mid_prices) - np.array(last_transactions))
computed_state = np.array(([holdings[(- 1)], imbalances[(- 1)], spreads[(- 1)], direction_features[(- 1)]] + padded_returns.tolist()), dtype=np.float32)
return computed_state.reshape(self.num_state_features, 1)
_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[(str, Any)]) -> float:
if (self.reward_mode == 'dense'):
holdings = raw_state['internal_data']['holdings']
cash = raw_state['internal_data']['cash']
last_transaction = raw_state['parsed_mkt_data']['last_transaction']
marked_to_market = (cash + (holdings * last_transaction))
reward = (marked_to_market - self.previous_marked_to_market)
reward = (reward / self.order_fixed_size)
num_ns_day = ((((16 - 9.5) * 60) * 60) * .0)
step_length = self.timestep_duration
num_steps_per_episode = (num_ns_day / step_length)
reward = (reward / num_steps_per_episode)
self.previous_marked_to_market = marked_to_market
return reward
elif (self.reward_mode == 'sparse'):
return 0
_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[(str, Any)]) -> float:
if (self.reward_mode == 'dense'):
return 0
elif (self.reward_mode == 'sparse'):
holdings = raw_state['internal_data']['holdings']
cash = raw_state['internal_data']['cash']
last_transaction = raw_state['parsed_mkt_data']['last_transaction']
marked_to_market = (cash + (holdings * last_transaction))
reward = (marked_to_market - self.starting_cash)
reward = (reward / self.order_fixed_size)
num_ns_day = ((((16 - 9.5) * 60) * 60) * .0)
step_length = self.timestep_duration
num_steps_per_episode = (num_ns_day / step_length)
reward = (reward / num_steps_per_episode)
return reward
_state_pre_process
def raw_state_to_done(self, raw_state: Dict[(str, Any)]) -> bool:
holdings = raw_state['internal_data']['holdings']
cash = raw_state['internal_data']['cash']
last_transaction = raw_state['parsed_mkt_data']['last_transaction']
marked_to_market = (cash + (holdings * last_transaction))
done = (marked_to_market <= self.down_done_condition)
return done
_state_pre_process
def raw_state_to_info(self, raw_state: Dict[(str, Any)]) -> Dict[(str, Any)]:
last_transaction = raw_state['parsed_mkt_data']['last_transaction']
bids = raw_state['parsed_mkt_data']['bids']
best_bid = (bids[0][0] if (len(bids) > 0) else last_transaction)
asks = raw_state['parsed_mkt_data']['asks']
best_ask = (asks[0][0] if (len(asks) > 0) else last_transaction)
cash = raw_state['internal_data']['cash']
current_time = raw_state['internal_data']['current_time']
holdings = raw_state['internal_data']['holdings']
spread = (best_ask - best_bid)
orderbook = {'asks': {'price': {}, 'volume': {}}, 'bids': {'price': {}, 'volume': {}}}
for (book, book_name) in [(bids, 'bids'), (asks, 'asks')]:
for level in [0, 1, 2]:
(price, volume) = markets_agent_utils.get_val(bids, level)
orderbook[book_name]['price'][level] = np.array([price]).reshape((- 1))
orderbook[book_name]['volume'][level] = np.array([volume]).reshape((- 1))
order_status = raw_state['internal_data']['order_status']
mkt_open = raw_state['internal_data']['mkt_open']
mkt_close = raw_state['internal_data']['mkt_close']
last_bid = markets_agent_utils.get_last_val(bids, last_transaction)
last_ask = markets_agent_utils.get_last_val(asks, last_transaction)
wide_spread = (last_ask - last_bid)
ask_spread = (last_ask - best_ask)
bid_spread = (best_bid - last_bid)
marked_to_market = (cash + (holdings * last_transaction))
if (self.debug_mode == True):
return {'last_transaction': last_transaction, 'best_bid': best_bid, 'best_ask': best_ask, 'spread': spread, 'bids': bids, 'asks': asks, 'cash': cash, 'current_time': current_time, 'holdings': holdings, 'orderbook': orderbook, 'order_status': order_status, 'mkt_open': mkt_open, 'mkt_close': mkt_close, 'last_bid': last_bid, 'last_ask': last_ask, 'wide_spread': wide_spread, 'ask_spread': ask_spread, 'bid_spread': bid_spread, 'marked_to_market': marked_to_market}
else:
return {} |
class NMTDataSet():
def __init__(self, data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab, subword, create_vocab):
self.train_src_path = os.path.join(data_path, 'train.{}'.format(src_lang))
self.train_tgt_path = os.path.join(data_path, 'train.{}'.format(tgt_lang))
self.dev_src_path = os.path.join(data_path, 'dev.{}'.format(src_lang))
self.dev_tgt_path = os.path.join(data_path, 'dev.{}'.format(tgt_lang))
self.test_src_path = os.path.join(data_path, 'test.{}'.format(src_lang))
self.test_tgt_path = os.path.join(data_path, 'test.{}'.format(tgt_lang))
self.subword = subword
if ('bpe' in subword):
self.dev_tgt_path_ori = os.path.join(data_path, 'dev.{}.ori'.format(tgt_lang))
self.test_tgt_path_ori = os.path.join(data_path, 'test.{}.ori'.format(tgt_lang))
else:
self.dev_tgt_path_ori = self.dev_tgt_path
self.test_tgt_path_ori = self.test_tgt_path
if (not create_vocab):
assert ((src_vocab_path is not None) and (tgt_vocab_path is not None) and os.path.exists(src_vocab_path) and os.path.exists(tgt_vocab_path))
(self.src_word2id, self.src_id2word) = self.load_vocab(src_vocab_path)
(self.tgt_word2id, self.tgt_id2word) = self.load_vocab(tgt_vocab_path)
else:
if (subword == 'joint-bpe'):
joint_path = os.path.join(data_path, 'joint.tmp')
os.system(('cat %s %s > %s' % (self.train_src_path, self.train_tgt_path, joint_path)))
assert (src_max_vocab == tgt_max_vocab), 'src max vocab size != tgt max vocab size'
(word2id, id2word) = self.get_vocab(joint_path, src_max_vocab, has_pad=True)
os.remove(joint_path)
self.src_word2id = self.tgt_word2id = word2id
self.src_id2word = self.tgt_id2word = id2word
else:
if (subword == 'sep-bpe'):
assert (src_max_vocab == tgt_max_vocab), 'src max vocab size != tgt max vocab size'
(self.src_word2id, self.src_id2word) = self.get_vocab(self.train_src_path, src_max_vocab, has_pad=True)
(self.tgt_word2id, self.tgt_id2word) = self.get_vocab(self.train_tgt_path, tgt_max_vocab, has_pad=True)
if ((src_vocab_path is not None) and (tgt_vocab_path is not None)):
self.save_vocab(self.src_id2word, src_vocab_path)
self.save_vocab(self.tgt_id2word, tgt_vocab_path)
self.src_vocab_size = len(self.src_word2id)
self.tgt_vocab_size = len(self.tgt_word2id)
self.src_pad_idx = self.src_word2id[SRC_PAD]
self.tgt_pad_idx = self.tgt_word2id[TGT_PAD]
print(f'Source vocab size={len(self.src_word2id)}, target vocab size={len(self.tgt_word2id)}')
def load_vocab(self, path):
word2id = {}
i = 0
with codecs.open(path, 'r', 'utf-8') as fin:
for line in fin:
word2id[line.strip()] = i
i += 1
id2word = {v: k for (k, v) in word2id.items()}
return (word2id, id2word)
def save_vocab(self, id2word, path):
print(f'Saving vocab to {path}')
with codecs.open(path, 'w', encoding='utf-8') as fout:
for i in range(len(id2word)):
fout.write((id2word[i] + '\n'))
def get_vocab(self, path, max_vocab=(- 1), has_pad=True):
if (max_vocab > 0):
max_vocab = ((max_vocab - 3) if has_pad else (max_vocab - 2))
wordlist = get_sorted_wordlist(path)
if (max_vocab > 0):
wordlist = wordlist[:max_vocab]
word2id = {}
if has_pad:
word2id[PAD] = 0
word2id[UNK] = len(word2id)
word2id[EOS] = len(word2id)
for word in wordlist:
word2id[word] = len(word2id)
id2word = {i: word for (word, i) in word2id.items()}
return (word2id, id2word)
def dump_to_file(self, ms, lengths, path, post_edit=True):
with codecs.open(path, 'w', encoding='utf-8') as fout:
for (m, length) in zip(ms, lengths):
m = m.cpu().numpy()
length = length.cpu().numpy()
for (line, l) in zip(m, length):
sent = []
for w in line[:l]:
word = self.tgt_id2word[w]
if (word == EOS):
break
sent.append(word)
if (post_edit and ((self.subword == 'sep-bpe') or (self.subword == 'joint-bpe'))):
line = ' '.join(sent)
line = line.replace(' ', '').strip()
if line.endswith(''):
line = line[(- 2):]
elif (post_edit and (self.subword == 'joint-spm')):
line = ''.join(sent)
line = line.replace('', ' ').strip()
else:
line = ' '.join(sent)
fout.write((line + '\n')) |
_registry(pattern_type='RemoveZeros')
class RemoveZeros(Pattern):
def __call__(self, model):
if (model.framework_modeling_config['framework'] != 'torch'):
return model
remove_list = []
node_idx = 0
while (node_idx < len(model.nodes)):
node = model.nodes[node_idx]
if (node.op_type == 'Zeros'):
for dest_op_name in node.output_tensors[0].dest_op:
dest_node = model.get_node_by_name(dest_op_name)
if ((dest_node.op_type == 'BinaryAdd') or (dest_node.op_type == 'Add')):
remove_list.append(node.name)
remove_list.append(dest_node.name)
ori_tensor = None
for in_tensor in dest_node.input_tensors:
if (in_tensor.name != node.output_tensors[0].name):
ori_tensor = in_tensor
for add_dest_op_name in dest_node.output_tensors[0].dest_op:
add_dest_node = model.get_node_by_name(add_dest_op_name)
for in_tensor_idx in range(len(add_dest_node.input_tensors)):
if (add_dest_node.input_tensors[in_tensor_idx].name == dest_node.output_tensors[0].name):
add_dest_node.input_tensors[in_tensor_idx] = copy.deepcopy(ori_tensor)
if (dest_node.name in ori_tensor.dest_op):
ori_tensor.dest_op.remove(dest_node.name)
ori_tensor.dest_op.append(add_dest_node.name)
node_idx += 1
model.remove_nodes(remove_list)
return model |
class AnchorMatcherTest(tf.test.TestCase):
def test_get_correct_matched_columnIndices(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = match.matched_column_indices()
self.assertEquals(matched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_column_indices = sess.run(matched_column_indices)
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
self.assertEquals(num_matched_columns.dtype, tf.int32)
self.assertEquals(num_unmatched_columns.dtype, tf.int32)
self.assertEquals(num_ignored_columns.dtype, tf.int32)
with self.test_session() as sess:
(num_matched_columns_out, num_unmatched_columns_out, num_ignored_columns_out) = sess.run([num_matched_columns, num_unmatched_columns, num_ignored_columns])
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
def testGetCorrectUnmatchedColumnIndices(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indices = [2, 4]
unmatched_column_indices = match.unmatched_column_indices()
self.assertEquals(unmatched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_column_indices = sess.run(unmatched_column_indices)
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_row_indices = [3, 1, 0, 5]
matched_row_indices = match.matched_row_indices()
self.assertEquals(matched_row_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_row_inds = sess.run(matched_row_indices)
self.assertAllEqual(matched_row_inds, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indices = [6]
ignored_column_indices = match.ignored_column_indices()
self.assertEquals(ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
ignored_column_indices = sess.run(ignored_column_indices)
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = match.matched_column_indicator()
self.assertEquals(matched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
matched_column_indicator = sess.run(matched_column_indicator)
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = match.unmatched_column_indicator()
self.assertEquals(unmatched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
unmatched_column_indicator = sess.run(unmatched_column_indicator)
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = match.ignored_column_indicator()
self.assertEquals(ignored_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
ignored_column_indicator = sess.run(ignored_column_indicator)
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
match_results = tf.constant([3, 1, (- 1), 0, (- 1), 5, (- 2)])
match = matcher.Match(match_results)
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = match.unmatched_or_ignored_column_indices()
self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_ignored_column_indices = sess.run(unmatched_ignored_column_indices)
self.assertAllEqual(unmatched_ignored_column_indices, expected_column_indices)
def test_all_columns_accounted_for(self):
num_matches = 10
match_results = tf.random_uniform([num_matches], minval=(- 2), maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
with self.test_session() as sess:
(matched, unmatched, ignored) = sess.run([matched_column_indices, unmatched_column_indices, ignored_column_indices])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted, np.arange(num_matches, dtype=np.int32)) |
def smooth_clip(x, v, smoothing, max_iters=200):
test_x = copy.deepcopy(x)
v_i = copy.deepcopy(v)
iter_i = 0
n = 1.0
while ((n > 0) and (iter_i < max_iters)):
result_img = (test_x + v_i)
overshoot = ((result_img - 1.0) >= 0)
belowshoot = ((result_img - 0.0) <= 0)
ov_max = ((result_img - 1.0) * 0.1)
bl_max = (((result_img - 0.0) * 0.1) * (- 1.0))
ov_max = np.maximum(ov_max.max(), 0.01)
bl_max = np.maximum(bl_max.max(), 0.01)
overshoot = smoothing(overshoot)
belowshoot = smoothing(belowshoot)
maxx_ov = (np.max(overshoot) + 1e-12)
maxx_bl = (np.max(belowshoot) + 1e-12)
overshoot = (overshoot / maxx_ov)
belowshoot = (belowshoot / maxx_bl)
v_i = ((v_i - (overshoot * ov_max)) + (belowshoot * bl_max))
result_img = (test_x + v_i)
overshoot = ((result_img - 1.0) >= 0)
belowshoot = ((result_img - 0.0) <= 0)
n_ov = overshoot.sum()
n_bl = belowshoot.sum()
n = (n_ov + n_bl)
iter_i += 1
return v_i |
class ValorCaptionEvalDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, aud_processor, vis_root, aud_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.aud_processor = aud_processor
self.aud_root = aud_root
def __getitem__(self, index):
ann = self.annotation[index]
vname = (ann['video'] if ('video' in ann.keys()) else (ann['video_id'] + '.mp4'))
video_path = os.path.join(self.vis_root, vname)
video = self.vis_processor(video_path)
aname = (ann['audio'] if ('audio' in ann.keys()) else vname.replace('mp4', 'wav'))
apath = os.path.join(self.aud_root, aname)
auds = self.aud_processor(apath)
return {'video': video, 'audio': auds, 'image_id': (ann['image_id'] if ('image_id' in ann.keys()) else ann['video_id'])} |
class RteProcessor(DataProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['entailment', 'not_entailment']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = f'{set_type}-{line[0]}'
text_a = line[1]
text_b = line[2]
label = (None if (set_type == 'test') else line[(- 1)])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
('/topics', methods=['GET'])
def topics():
topics = db.get_topics()
return make_response(jsonify({'topics': topics}), 200) |
def loadZipToMem(zip_file, csv_name):
print('Loading dataset zip file...', end='')
from zipfile import ZipFile
input_zip = ZipFile(zip_file)
data = {name: input_zip.read(name) for name in input_zip.namelist()}
train = list((row.split(',') for row in data[csv_name].decode('utf-8').split('\n') if (len(row) > 0)))
print('Loaded ({0}) data.'.format(len(train)))
return (data, train) |
def execute_indent_transformation(list_transformed_code):
for (index, file_path) in enumerate(globals.list_trans_indent_modified_file):
trans_location_idxs = globals.list_trans_indent_location_idxs[index]
trans_indent_level = globals.list_trans_indent_level[index]
file_path_idx = globals.list_code_path.index(file_path)
lines_transformed = list_transformed_code[file_path_idx].split('\n')
for idx in trans_location_idxs:
this_indent_level = trans_indent_level[trans_location_idxs.index(idx)]
lines_transformed[idx] = (((' ' * 4) * this_indent_level) + lines_transformed[idx])
code_transformed = ''.join([(i + '\n') for i in lines_transformed])[0:(- 1)]
list_transformed_code[file_path_idx] = code_transformed
return list_transformed_code |
def get_image_metadata_from_bytesio(input, size, file_path=None):
height = (- 1)
width = (- 1)
data = input.read(26)
msg = ' raised while trying to decode as JPEG.'
if ((size >= 10) and (data[:6] in (b'GIF87a', b'GIF89a'))):
imgtype = GIF
(w, h) = struct.unpack('<HH', data[6:10])
width = int(w)
height = int(h)
elif ((size >= 24) and data.startswith(b'\x89PNG\r\n\x1a\n') and (data[12:16] == b'IHDR')):
imgtype = PNG
(w, h) = struct.unpack('>LL', data[16:24])
width = int(w)
height = int(h)
elif ((size >= 16) and data.startswith(b'\x89PNG\r\n\x1a\n')):
imgtype = PNG
(w, h) = struct.unpack('>LL', data[8:16])
width = int(w)
height = int(h)
elif ((size >= 2) and data.startswith(b'\xff\xd8')):
imgtype = JPEG
input.seek(0)
input.read(2)
b = input.read(1)
try:
while (b and (ord(b) != 218)):
while (ord(b) != 255):
b = input.read(1)
while (ord(b) == 255):
b = input.read(1)
if ((ord(b) >= 192) and (ord(b) <= 195)):
input.read(3)
(h, w) = struct.unpack('>HH', input.read(4))
break
else:
input.read((int(struct.unpack('>H', input.read(2))[0]) - 2))
b = input.read(1)
width = int(w)
height = int(h)
except struct.error:
raise UnknownImageFormat(('StructError' + msg))
except ValueError:
raise UnknownImageFormat(('ValueError' + msg))
except Exception as e:
raise UnknownImageFormat((e.__class__.__name__ + msg))
elif ((size >= 26) and data.startswith(b'BM')):
imgtype = 'BMP'
headersize = struct.unpack('<I', data[14:18])[0]
if (headersize == 12):
(w, h) = struct.unpack('<HH', data[18:22])
width = int(w)
height = int(h)
elif (headersize >= 40):
(w, h) = struct.unpack('<ii', data[18:26])
width = int(w)
height = abs(int(h))
else:
raise UnknownImageFormat(('Unkown DIB header size:' + str(headersize)))
elif ((size >= 8) and (data[:4] in (b'II*\x00', b'MM\x00*'))):
imgtype = TIFF
byteOrder = data[:2]
boChar = ('>' if (byteOrder == 'MM') else '<')
tiffTypes = {1: (1, (boChar + 'B')), 2: (1, (boChar + 'c')), 3: (2, (boChar + 'H')), 4: (4, (boChar + 'L')), 5: (8, (boChar + 'LL')), 6: (1, (boChar + 'b')), 7: (1, (boChar + 'c')), 8: (2, (boChar + 'h')), 9: (4, (boChar + 'l')), 10: (8, (boChar + 'll')), 11: (4, (boChar + 'f')), 12: (8, (boChar + 'd'))}
ifdOffset = struct.unpack((boChar + 'L'), data[4:8])[0]
try:
countSize = 2
input.seek(ifdOffset)
ec = input.read(countSize)
ifdEntryCount = struct.unpack((boChar + 'H'), ec)[0]
ifdEntrySize = 12
for i in range(ifdEntryCount):
entryOffset = ((ifdOffset + countSize) + (i * ifdEntrySize))
input.seek(entryOffset)
tag = input.read(2)
tag = struct.unpack((boChar + 'H'), tag)[0]
if ((tag == 256) or (tag == 257)):
type = input.read(2)
type = struct.unpack((boChar + 'H'), type)[0]
if (type not in tiffTypes):
raise UnknownImageFormat(('Unkown TIFF field type:' + str(type)))
typeSize = tiffTypes[type][0]
typeChar = tiffTypes[type][1]
input.seek((entryOffset + 8))
value = input.read(typeSize)
value = int(struct.unpack(typeChar, value)[0])
if (tag == 256):
width = value
else:
height = value
if ((width > (- 1)) and (height > (- 1))):
break
except Exception as e:
raise UnknownImageFormat(str(e))
elif (size >= 2):
imgtype = 'ICO'
input.seek(0)
reserved = input.read(2)
if (0 != struct.unpack('<H', reserved)[0]):
raise UnknownImageFormat(FILE_UNKNOWN)
format = input.read(2)
assert (1 == struct.unpack('<H', format)[0])
num = input.read(2)
num = struct.unpack('<H', num)[0]
if (num > 1):
import warnings
warnings.warn('ICO File contains more than one image')
w = input.read(1)
h = input.read(1)
width = ord(w)
height = ord(h)
else:
raise UnknownImageFormat(FILE_UNKNOWN)
return Image(path=file_path, type=imgtype, file_size=size, width=width, height=height) |
class SplinterModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = SplinterConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels)
def create_and_check_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = SplinterModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = SplinterForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
def convert_torgb(vars, source_name, target_name):
weight = vars[(source_name + '/weight')].value().eval()
mod_weight = vars[(source_name + '/mod_weight')].value().eval()
mod_bias = vars[(source_name + '/mod_bias')].value().eval()
bias = vars[(source_name + '/bias')].value().eval()
dic = {'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0), 'conv.modulation.weight': mod_weight.transpose((1, 0)), 'conv.modulation.bias': (mod_bias + 1), 'bias': bias.reshape((1, 3, 1, 1))}
dic_torch = {}
for (k, v) in dic.items():
dic_torch[((target_name + '.') + k)] = torch.from_numpy(v)
return dic_torch |
class MetersDict(OrderedDict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.priorities = []
def __setitem__(self, key, value):
assert (key not in self), "MetersDict doesn't support reassignment"
(priority, value) = value
bisect.insort(self.priorities, (priority, len(self.priorities), key))
super().__setitem__(key, value)
for (_, _, key) in self.priorities:
self.move_to_end(key)
def add_meter(self, key, meter, priority):
self.__setitem__(key, (priority, meter))
def state_dict(self):
return [(pri, key, self[key].__class__.__name__, self[key].state_dict()) for (pri, _, key) in self.priorities if (not isinstance(self[key], MetersDict._DerivedMeter))]
def load_state_dict(self, state_dict):
self.clear()
self.priorities.clear()
for (pri, key, meter_cls, meter_state) in state_dict:
meter = globals()[meter_cls]()
meter.load_state_dict(meter_state)
self.add_meter(key, meter, pri)
def get_smoothed_value(self, key: str) -> float:
meter = self[key]
if isinstance(meter, MetersDict._DerivedMeter):
return meter.fn(self)
else:
return meter.smoothed_value
def get_smoothed_values(self) -> Dict[(str, float)]:
return OrderedDict([(key, self.get_smoothed_value(key)) for key in self.keys() if (not key.startswith('_'))])
def reset(self):
for meter in self.values():
if isinstance(meter, MetersDict._DerivedMeter):
continue
meter.reset()
class _DerivedMeter(Meter):
def __init__(self, fn):
self.fn = fn
def reset(self):
pass |
def get_checkpoint_callback(output_dir, metric):
if (metric == 'rouge2'):
exp = '{val_avg_rouge2:.4f}-{step_count}'
elif (metric == 'bleu'):
exp = '{val_avg_bleu:.4f}-{step_count}'
elif (metric == 'em'):
exp = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this function.')
checkpoint_callback = ModelCheckpoint(dirpath=output_dir, filename=exp, monitor=f'val_{metric}', mode='max', save_top_k=3, period=1)
return checkpoint_callback |
def download_google_drive_url(url: str, output_path: str, output_file_name: str):
import requests
with requests.Session() as session:
with session.get(url, stream=True, allow_redirects=True) as response:
for (k, v) in response.cookies.items():
if k.startswith('download_warning'):
url = ((url + '&confirm=') + v)
with session.get(url, stream=True, verify=True) as response:
makedir(output_path)
path = os.path.join(output_path, output_file_name)
total_size = int(response.headers.get('Content-length', 0))
with open(path, 'wb') as file:
from tqdm import tqdm
with tqdm(total=total_size) as progress_bar:
for block in response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE):
file.write(block)
progress_bar.update(len(block)) |
def get_latest_match(pattern: Union[(Path, str)]) -> Path:
all_matches = (Path(p) for p in glob.glob(str(pattern), recursive=True))
latest_match = max(all_matches, key=(lambda x: x.stat().st_mtime))
return latest_match |
def logger_info(logger_name, log_path='default_logger.log'):
log = logging.getLogger(logger_name)
if log.hasHandlers():
print('LogHandlers exist!')
else:
print('LogHandlers setup!')
level = logging.INFO
formatter = logging.Formatter('%(asctime)s.%(msecs)03d : %(message)s', datefmt='%y-%m-%d %H:%M:%S')
fh = logging.FileHandler(log_path, mode='a')
fh.setFormatter(formatter)
log.setLevel(level)
log.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
log.addHandler(sh) |
def unixify(paths):
for path in paths:
for file in os.listdir(path):
if (('.py' in file) or ('.sh' in file)):
_ = os.system(((('bash -c "dos2unix ' + path) + file) + ' 2&> /dev/null"')) |
def main(args):
ConfigureGPU(args)
np.random.seed(0)
data_file_info = args['data_file'].split('.')
data_type = data_file_info[(- 1)]
root = ''
for (i, tok) in enumerate(data_file_info[:(- 1)]):
if ((i < (len(data_file_info) - 1)) and (i > 0)):
root += '.'
root += tok
if (data_type == 'npz'):
dataset = NpzGeneratorDataset(root)
data = dataset.load(success_only=args['success_only'])
elif (data_type == 'h5f'):
dataset = H5fGeneratorDataset(root)
data = dataset.load(success_only=args['success_only'])
else:
raise NotImplementedError(('data type not implemented: %s' % data_type))
if (('features' not in args) or (args['features'] is None)):
raise RuntimeError('Must provide features specification')
features_arg = args['features']
if (('model' not in args) or (args['model'] is None)):
raise RuntimeError('Must provide a model to load')
model_arg = args['model']
model = MakeModel(taskdef=None, **args)
model.validate = True
model.load(world=None, **data)
train_generator = model.trainGenerator(dataset)
test_generator = model.testGenerator(dataset)
show = False
correct_g1 = 0
correct_g2 = 0
total = 0.0
err1_sum = 0.0
err2_sum = 0.0
v_sum = 0.0
ii = 0
for filename in dataset.test:
print(filename)
data = dataset.loadFile(filename)
(features, targets) = model._getData(**data)
[I0, I, o1, o2, oin] = features
length = I0.shape[0]
[I_target, I_target2] = targets[:2]
for i in range(length):
ii += 1
xi = np.expand_dims(I[i], axis=0)
x0 = np.expand_dims(I0[i], axis=0)
prev_option = np.array([oin[i]])
h = model.encode(xi)
h0 = model.encode(x0)
h_goal = model.transform(h0, h, np.array([o1[i]]))
h_goal2 = model.transform(h0, h_goal, np.array([o2[i]]))
xg = model.decode(h_goal)
xg2 = model.decode(h_goal2)
if show:
plt.subplot(1, 4, 1)
plt.imshow(x0[0])
plt.subplot(1, 4, 2)
plt.imshow(xi[0])
plt.subplot(1, 4, 3)
plt.imshow(xg[0])
plt.subplot(1, 4, 4)
plt.imshow(xg2[0])
plt.show()
err1 = np.mean(np.abs((xg[0] - I_target[i])))
err2 = np.mean(np.abs((xg2[0] - I_target2[i])))
err1_sum += err1
err2_sum += err2
total += 1.0
mean1 = (err1_sum / total)
mean2 = (err2_sum / total)
print(o1[i], o2[i], 'means =', mean1, mean2, 'avg =', (v_sum / total)) |
_grad()
def valid_step(model, criterion, val_loader):
model.eval()
(avg_loss, avg_acc) = (0.0, 0.0)
for (i, (x_imgs, labels)) in enumerate(val_loader):
(x_imgs, labels) = (x_imgs.to(args.device), labels.to(args.device))
outputs = model(x_imgs)
loss = criterion(outputs, labels)
avg_loss += loss.item()
(_, preds) = torch.max(outputs, 1)
avg_acc += torch.sum((preds == labels.data)).item()
return {'loss': (avg_loss / len(val_loader)), 'accuracy': (avg_acc / len(val_loader.dataset))} |
class MLPBase(NNBase):
def __init__(self, num_inputs: int, recurrent: bool=False, hidden_size: int=64) -> None:
super().__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = (lambda m: init(m, nn.init.orthogonal_, (lambda x: nn.init.constant_(x, 0)), np.sqrt(2)))
self.actor = nn.Sequential(init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
(x, rnn_hxs) = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return (self.critic_linear(hidden_critic), hidden_actor, rnn_hxs) |
class DatasetCache(data.Dataset):
def __init__(self, root, load_bytes=False, transform=None, class_map='', use_cache=False):
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
(images, class_to_idx) = find_images_and_targets(root, class_to_idx=class_to_idx)
if (len(images) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.samples = images
self.imgs = self.samples
self.class_to_idx = class_to_idx
self.load_bytes = load_bytes
self.transform = transform
self.use_cache = use_cache
self.cached_img_data = []
self.cached_target_data = []
def __getitem__(self, index):
if (not self.use_cache):
(path, target) = self.samples[index]
img = (open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB'))
self.cached_img_data.append(img)
self.cached_target_data.append(target)
else:
img = self.cached_img_data[index]
target = self.cached_target_data[index]
if (self.transform is not None):
img = self.transform(img)
if (target is None):
target = torch.zeros(1).long()
return (img, target)
def set_use_cache(self, use_cache):
if (not use_cache):
self.cached_img_data = []
self.cached_target_data = []
self.use_cache = use_cache
def __len__(self):
return len(self.imgs)
def filenames(self, indices=[], basename=False):
if indices:
if basename:
return [os.path.basename(self.samples[i][0]) for i in indices]
else:
return [self.samples[i][0] for i in indices]
elif basename:
return [os.path.basename(x[0]) for x in self.samples]
else:
return [x[0] for x in self.samples] |
def kaiming_uniform_(tensor, gain=1.0, mode='fan_in'):
fan = _calculate_correct_fan(tensor, mode)
var = (gain / max(1.0, fan))
bound = math.sqrt((3.0 * var))
with torch.no_grad():
return tensor.uniform_((- bound), bound) |
class TestAnchorGenerator(unittest.TestCase):
def test_default_anchor_generator(self):
cfg = get_cfg()
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)])
num_images = 2
features = {'stage3': torch.rand(num_images, 96, 1, 2)}
anchors = anchor_generator([features['stage3']])
expected_anchor_tensor = torch.tensor([[(- 32.0), (- 8.0), 32.0, 8.0], [(- 16.0), (- 16.0), 16.0, 16.0], [(- 8.0), (- 32.0), 8.0, 32.0], [(- 64.0), (- 16.0), 64.0, 16.0], [(- 32.0), (- 32.0), 32.0, 32.0], [(- 16.0), (- 64.0), 16.0, 64.0], [(- 28.0), (- 8.0), 36.0, 8.0], [(- 12.0), (- 16.0), 20.0, 16.0], [(- 4.0), (- 32.0), 12.0, 32.0], [(- 60.0), (- 16.0), 68.0, 16.0], [(- 28.0), (- 32.0), 36.0, 32.0], [(- 12.0), (- 64.0), 20.0, 64.0]])
for i in range(num_images):
assert torch.allclose(anchors[i][0].tensor, expected_anchor_tensor)
def test_rrpn_anchor_generator(self):
cfg = get_cfg()
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 45]]
anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)])
num_images = 2
features = {'stage3': torch.rand(num_images, 96, 1, 2)}
anchors = anchor_generator([features['stage3']])
expected_anchor_tensor = torch.tensor([[0.0, 0.0, 64.0, 16.0, 0.0], [0.0, 0.0, 64.0, 16.0, 45.0], [0.0, 0.0, 32.0, 32.0, 0.0], [0.0, 0.0, 32.0, 32.0, 45.0], [0.0, 0.0, 16.0, 64.0, 0.0], [0.0, 0.0, 16.0, 64.0, 45.0], [0.0, 0.0, 128.0, 32.0, 0.0], [0.0, 0.0, 128.0, 32.0, 45.0], [0.0, 0.0, 64.0, 64.0, 0.0], [0.0, 0.0, 64.0, 64.0, 45.0], [0.0, 0.0, 32.0, 128.0, 0.0], [0.0, 0.0, 32.0, 128.0, 45.0], [4.0, 0.0, 64.0, 16.0, 0.0], [4.0, 0.0, 64.0, 16.0, 45.0], [4.0, 0.0, 32.0, 32.0, 0.0], [4.0, 0.0, 32.0, 32.0, 45.0], [4.0, 0.0, 16.0, 64.0, 0.0], [4.0, 0.0, 16.0, 64.0, 45.0], [4.0, 0.0, 128.0, 32.0, 0.0], [4.0, 0.0, 128.0, 32.0, 45.0], [4.0, 0.0, 64.0, 64.0, 0.0], [4.0, 0.0, 64.0, 64.0, 45.0], [4.0, 0.0, 32.0, 128.0, 0.0], [4.0, 0.0, 32.0, 128.0, 45.0]])
for i in range(num_images):
assert torch.allclose(anchors[i][0].tensor, expected_anchor_tensor) |
def step_lr_scheduler(param_lr, optimizer, iter_num, gamma, stepsize, init_lr=0.001):
lr = (init_lr * (gamma ** (iter_num // stepsize)))
i = 0
for param_group in optimizer.param_groups:
param_group['lr'] = (lr * param_lr[i])
i += 1
return optimizer |
def common_conv2d(inplanes, planes, kernel, padding, stride, norm_cfg=dict(type='BN')):
cell = OrderedDict()
cell['conv'] = nn.Conv2d(inplanes, planes, kernel_size=kernel, stride=stride, padding=padding, bias=False)
if norm_cfg:
(norm_name, norm) = build_norm_layer(norm_cfg, planes)
cell[norm_name] = norm
cell['leakyrelu'] = nn.LeakyReLU(0.1)
cell = nn.Sequential(cell)
return cell |
def evaluate_vit_separate(model, template, search, template_event, search_event):
T_w = 50
T_t = 1000
print('testing speed ...')
z = model.forward_backbone(template, image_type='template')
x = model.forward_backbone(search, image_type='search')
z_event = model.forward_backbone(template_event, image_type='template_event')
x_event = model.forward_backbone(search_event, image_type='search_event')
with torch.no_grad():
for i in range(T_w):
_ = model.forward_backbone(search, image_type='search')
_ = model.forward_cat(z, x, z_event, x_event)
start = time.time()
for i in range(T_t):
_ = model.forward_backbone(search, image_type='search')
_ = model.forward_cat(z, x, z_event, x_event)
end = time.time()
avg_lat = ((end - start) / T_t)
print(('The average overall latency is %.2f ms' % (avg_lat * 1000))) |
def reverse_object_order(example_records):
reversed_records = collections.defaultdict(list)
for (image_pair, records) in example_records.items():
reversed_records[image_pair].extend(records)
for record in records:
reversed_record = Record(record.bbox_b, record.bbox_a, 0, 0)
for attr in VRDAttribute:
original_label = getattr(record, attr.value)
if (original_label == 1):
reversed_label = 2
elif (original_label == 2):
reversed_label = 1
else:
reversed_label = original_label
setattr(reversed_record, attr.value, reversed_label)
reversed_records[image_pair].append(reversed_record)
return reversed_records |
class Embeddings(nn.Module):
def __init__(self, embedding_dim: int=64, scale: bool=False, vocab_size: int=0, padding_idx: int=1, freeze: bool=False, **kwargs):
super().__init__()
self.embedding_dim = embedding_dim
self.scale = scale
self.vocab_size = vocab_size
self.lut = nn.Embedding(vocab_size, self.embedding_dim, padding_idx=padding_idx)
if freeze:
freeze_params(self)
def forward(self, x: Tensor) -> Tensor:
if self.scale:
return (self.lut(x) * math.sqrt(self.embedding_dim))
return self.lut(x)
def __repr__(self):
return ('%s(embedding_dim=%d, vocab_size=%d)' % (self.__class__.__name__, self.embedding_dim, self.vocab_size))
def load_from_file(self, embed_path: str, vocab: Vocabulary):
embed_dict = {}
with io.open(embed_path, 'r', encoding='utf-8', errors='ignore') as f_embed:
(vocab_size, d) = map(int, f_embed.readline().split())
assert (self.embedding_dim == d), "Embedding dimension doesn't match."
for line in f_embed.readlines():
tokens = line.rstrip().split(' ')
if (tokens[0] in vocab.stoi.keys()):
embed_dict[tokens[0]] = torch.FloatTensor([float(t) for t in tokens[1:]])
logger.warning('Loaded {} of {} ({:%}) tokens in the pre-trained embeddings.'.format(len(embed_dict), vocab_size, (len(embed_dict) / vocab_size)))
for idx in range(len(vocab)):
token = vocab.itos[idx]
if (token in embed_dict):
assert (self.embedding_dim == len(embed_dict[token]))
self.lut.weight.data[idx] = embed_dict[token]
logger.warning("Loaded {} of {} ({:%}) tokens of the JoeyNMT's vocabulary.".format(len(embed_dict), len(vocab), (len(embed_dict) / len(vocab)))) |
class NoOCRReaderFound(Exception):
def __init__(self, e):
self.e = e
def __str__(self):
return f'Could not load OCR Reader: {self.e}' |
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.avg = 0.0
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.avg = (self.total_time / self.calls)
if average:
return self.avg
else:
return self.diff |
def standardized_svr(X, y, Cs=np.logspace((- 7), 1, 9), n_jobs=1):
(n_samples, n_features) = X.shape
steps = [('SVR', LinearSVR())]
pipeline = Pipeline(steps)
parameters = {'SVR__C': Cs}
grid = GridSearchCV(pipeline, param_grid=parameters, n_jobs=n_jobs)
grid.fit(X, y)
beta_hat = grid.best_estimator_.named_steps['SVR'].coef_
std = (norm(beta_hat) / np.sqrt(n_features))
scale = (std * np.ones(beta_hat.size))
return (beta_hat, scale) |
class MarianMTModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def resnet152_mpncov_128(pretrained=False, progress=True, **kwargs):
return _resnet_mpncov_128('resnet152_mpncov_128', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) |
class MaxPooling2DVarPropagationLayer(VarPropagationLayer):
def __init__(self, pooling_layer, use_cov=False, **kwargs):
self.idx = None
super(MaxPooling2DVarPropagationLayer, self).__init__(pooling_layer, use_cov=False, **kwargs)
def _call_diag_cov(self, x):
(pooled, self.idx) = self._pool2d_argmax(self.layer.input, pool_size=self.layer.pool_size, strides=self.layer.strides, padding=self.layer.padding)
out = tf_gather_batch(x, self.idx)
return out
def _pool2d_argmax(self, x: 'Tensor', pool_size: tuple, strides: tuple=(1, 1), padding: str='valid', data_format: str=None) -> tuple:
data_format = K.common.normalize_data_format(data_format)
(x, tf_data_format) = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if (tf_data_format == 'NHWC'):
strides = (((1,) + strides) + (1,))
pool_size = (((1,) + pool_size) + (1,))
else:
strides = ((1, 1) + strides)
pool_size = ((1, 1) + pool_size)
(x, idx) = tf.nn.max_pool_with_argmax(x, pool_size, strides, padding)
if ((data_format == 'channels_first') and (tf_data_format == 'NHWC')):
x = tf.transpose(x, (0, 3, 1, 2))
idx = tf.transpose(idx, (0, 3, 1, 2))
return (x, idx) |
def convolution2D(input_tensor, filters, kernel_size, strides, padding, activation, use_activation=True, use_bias=True, bn=True, if_regularization=False):
assert isinstance(kernel_size, int)
assert isinstance(filters, int)
assert isinstance(strides, tuple)
assert (len(strides) == 2)
assert ((padding == 'same') or (padding == 'valid'))
if if_regularization:
kernel_regularizer = K.regularizers.L2(0.01)
if use_bias:
bias_regularizer = K.regularizers.L2(0.01)
else:
bias_regularizer = None
else:
kernel_regularizer = None
bias_regularizer = None
conv_output = K.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=None, use_bias=use_bias, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)(input_tensor)
if bn:
conv_output = BatchNormalization()(conv_output)
if use_activation:
if (activation == 'relu'):
conv_output = tf.nn.relu(conv_output)
elif (activation == 'mish'):
conv_output = mishActivation(conv_output)
else:
raise ValueError('Wrong input activation mode')
return conv_output |
def setup_print_for_distributed(is_master):
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print |
def advance_api_run_func():
res = atorch.init_distributed('gloo', coworker_num_per_node=1)
assert res
data_size = 48
batch_size = 4
dataset = ToyDataset(data_size)
dataloader_args = {'batch_size': batch_size, 'drop_last': True}
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)
coworker_dataloader_args = {'sampler': sampler, 'batch_size': (batch_size * 2), 'drop_last': True}
io_timeout = 5
initialize_timeout = 15
epoch_num = 2
def split_batch(data, batch_size, index):
if isinstance(data, torch.Tensor):
res = torch.split(data, batch_size)
res = res[index]
elif isinstance(data, dict):
res = {}
for key in data:
res[key] = split_batch(data[key], batch_size, index)
elif (isinstance(data, tuple) or isinstance(data, list)):
res = []
for d in data:
res.append(split_batch(d, batch_size, index))
if isinstance(data, tuple):
res = tuple(res)
else:
return None
return res
class MyProcess():
def __init__(self, dataset, dataloader_args, training_batch_size=4, epoch_num=2):
self.dataset = dataset
self.dataloader_args = dataloader_args
self.epoch_num = epoch_num
self.training_batch_size = training_batch_size
def process_data(self, shm_context):
dataloader = DataLoader(self.dataset, **self.dataloader_args)
num_shms = (len(shm_context) if (type(shm_context) == list) else 1)
if (num_shms == 1):
shm_context = [shm_context]
should_stop = [False for _ in range(num_shms)]
for _ in range(self.epoch_num):
if all(should_stop):
break
for batch in dataloader:
if all(should_stop):
break
batches = [split_batch(batch, self.training_batch_size, i) for i in range((dataloader.batch_size // self.training_batch_size))]
for i in range(num_shms):
if should_stop[i]:
continue
shm_context[i].add_batch(batches)
stop_status = shm_context[i].get_stop_status()
if any(stop_status):
should_stop[i] = True
shm_context[i].add_batch(None)
for i in range(num_shms):
if (not should_stop[i]):
shm_context[i].add_batch(None)
my_process = MyProcess(dataset, coworker_dataloader_args, epoch_num=epoch_num, training_batch_size=4)
num_s = 2
dataset = ([dataset] * num_s)
dataloader_args = ([dataloader_args] * num_s)
shm_data_size = ([4] * num_s)
shm_name_prefix = [f'p_{x}' for x in range(num_s)]
dataloader = create_shm_dataloader(dataset, dataloader_args, coworker_data_process_func=my_process.process_data, io_timeout=io_timeout, initialize_timeout=initialize_timeout, shm_data_size=shm_data_size, shm_name_prefix=shm_name_prefix, coworker_wait_worker_read=True, coworker_wait_worker_read_timeout=10)
if (type(dataloader) != list):
dataloader = [dataloader]
itt = [iter(dataloader[idx]) for idx in range(num_s)]
count = ([0] * num_s)
total_0 = ([0] * num_s)
total_1 = ([0] * num_s)
doing = True
bb = ([False] * num_s)
while (doing and (not all(bb))):
for idx in range(num_s):
if bb[idx]:
continue
try:
data = next(itt[idx])
count[idx] += 1
for i in range(batch_size):
total_0[idx] += data[0][i][0].item()
total_1[idx] += data[1][i][0].item()
if (count[idx] == (data_size // batch_size)):
dataloader[idx].stop()
bb[idx] = True
except StopIteration:
doing = False
for idx in range(num_s):
assert (count[idx] == (data_size // batch_size))
assert (total_0[idx] == ((data_size * (data_size - 1)) // 2))
assert (total_1[idx] == data_size)
atorch.reset_distributed() |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, max_norm: float=0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for (k, v) in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict)))
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if (max_norm > 0):
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet pytorch model conversion to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', type=str, required=True, help='output ONNX filename')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
parser.add_argument('--passes', type=str, nargs='+', help='ONNX optimization passes')
args = parser.parse_args()
return args |
def run():
logging_GOCD.init_logging(log_file_path=param_log_file_path, log_file_mode=param_log_mode)
logging.info('Preparing before training.')
sys.path.append('..')
from symbol_farm import symbol_10_160_17L_4scales_v1 as net
(net_symbol, data_names, label_names) = net.get_net_symbol()
net_initializer = mxnet.initializer.Xavier()
logging.info('Get net symbol successfully.')
from data_provider_farm.pickle_provider import PickleProvider
from data_iterator_farm.multithread_dataiter_for_cross_entropy_v1 import Multithread_DataIter_for_CrossEntropy as DataIter
train_data_provider = PickleProvider(param_trainset_pickle_file_path)
train_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_train_dataiter, data_provider=train_data_provider, batch_size=param_train_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
val_dataiter = None
if ((param_valset_pickle_file_path != '') and (param_val_batch_size != 0) and (param_num_val_loops != 0) and (param_num_thread_val_dataiter != 0)):
val_data_provider = PickleProvider(param_valset_pickle_file_path)
val_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_val_dataiter, data_provider=val_data_provider, batch_size=param_val_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
from metric_farm.metric_default import Metric
train_metric = Metric(param_num_output_scales)
val_metric = None
if (val_dataiter is not None):
val_metric = Metric(param_num_output_scales)
train_GOCD.start_train(param_dict=param_dict, mxnet_module=mxnet, context=[mxnet.gpu(i) for i in param_GPU_idx_list], train_dataiter=train_dataiter, train_metric=train_metric, train_metric_update_frequency=param_train_metric_update_frequency, num_train_loops=param_num_train_loops, val_dataiter=val_dataiter, val_metric=val_metric, num_val_loops=param_num_val_loops, validation_interval=param_validation_interval, optimizer_name=param_optimizer_name, optimizer_params=param_optimizer_params, net_symbol=net_symbol, net_initializer=net_initializer, net_data_names=data_names, net_label_names=label_names, pretrained_model_param_path=param_pretrained_model_param_path, display_interval=param_display_interval, save_prefix=param_save_prefix, model_save_interval=param_model_save_interval, start_index=param_start_index) |
_start_docstrings('\n XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', XLM_ROBERTA_START_DOCSTRING)
class XLMRobertaForQuestionAnswering(RobertaForQuestionAnswering):
config_class = XLMRobertaConfig |
class TFSeq2SeqLMOutput(ModelOutput):
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None |
def determine_ip() -> str:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect(('10.0.0.0', 1))
ip = sock.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
sock.close()
return ip |
def resnet34(pre_trained_dir=None):
model = ResNet(BasicBlock, [3, 4, 6, 3])
if (pre_trained_dir is None):
return model
state_dict = model_zoo.load_url(M_URLS['resnet34'], model_dir=pre_trained_dir)
model.load_state_dict(state_dict, strict=False)
return model |
def precompute_stats(dataset, save_path, model=None, dims=2048):
from datasets import get_dataset_ref
ref_dataset = get_dataset_ref(dataset)
dataloader = DataLoader(ref_dataset, shuffle=False, batch_size=50)
if (model is None):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
(mu, sigma) = compute_stats_from_dataloader(dataloader, model)
np.savez(save_path, mu=mu, sigma=sigma) |
class CustomCallback(BaseCallback):
def __init__(self, env: CityLearnEnv, loader: IntProgress):
super().__init__(verbose=0)
self.loader = loader
self.env = env
self.reward_history = [0]
def _on_step(self) -> bool:
if (self.env.time_step == 0):
self.reward_history.append(0)
else:
self.reward_history[(- 1)] += sum(self.env.rewards[(- 1)])
self.loader.value += 1
return True |
class AgentSupplyBattle():
def __init__(self, episode_info) -> None:
self.episode_info = episode_info
def act(self, ts: int, state: AgentState) -> SupplyBattleAction:
pos = np.asarray(get_position(state))
if state.supply_states:
supply_info = list(state.supply_states.values())[0]
tar = np.asarray(get_position(supply_info))
dir = (tar - pos)
dir = (dir / np.linalg.norm(dir))
walk_dir = (get_picth_yaw(*dir)[1] % 360)
else:
walk_dir = random.randint(0, 360)
turn_lr_delta = 0
look_ud_delta = 0
attack = False
if state.enemy_states:
enemy_info = list(state.enemy_states.values())[0]
tar = np.asarray(get_position(enemy_info))
dir = (tar - pos)
dir = (dir / np.linalg.norm(dir))
(aim_pitch, aim_yaw) = get_picth_yaw(*dir)
diff_pitch = (aim_pitch - state.pitch)
diff_yaw = (aim_yaw - state.yaw)
if ((abs(diff_pitch) < 5) and (abs(diff_yaw) < 5)):
attack = True
skip_frames = self.episode_info['time_step_per_action']
rotate_speed_decay = 0.5
turn_lr_delta = ((diff_yaw / skip_frames) * rotate_speed_decay)
look_ud_delta = ((diff_pitch / skip_frames) * rotate_speed_decay)
return SupplyBattleAction(walk_dir=walk_dir, walk_speed=5, turn_lr_delta=turn_lr_delta, look_ud_delta=look_ud_delta, jump=False, pickup=True, attack=attack, reload=((state.weapon_ammo < 5) and (state.spare_ammo > 0))) |
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand', help='The action to perform.')
evaluate_pool_ranks = subparsers.add_parser('eval_pool_ranking')
evaluate_pool_ranks.add_argument('--gold_path', required=True, help='Path with gold data; Where the annotated files test-pid2anns-csfcube-{background/method/result}.json are.')
evaluate_pool_ranks.add_argument('--ranked_path', help='Path with ranked candidates; Files named test-pid2pool-csfcube-{YOUR-MODEL-NAME}-{FACET}-ranked.json')
evaluate_pool_ranks.add_argument('--experiment', help='Method name used to generate rankings.')
evaluate_pool_ranks.add_argument('--facet', choices=['background', 'method', 'result', 'all'], help="Facet of abstract to read from. 'all' aggregates overthe other three facets, it does not represent a valid facet.")
cl_args = parser.parse_args()
if (cl_args.subcommand == 'eval_pool_ranking'):
graded_eval_pool_rerank(data_path=cl_args.gold_path, method_name=cl_args.experiment, facet=cl_args.facet, dataset='csfcube', run_path=cl_args.ranked_path, split='dev')
graded_eval_pool_rerank(data_path=cl_args.gold_path, method_name=cl_args.experiment, facet=cl_args.facet, dataset='csfcube', run_path=cl_args.ranked_path, split='test')
else:
sys.stderr.write('Unknown action.') |
def _is_chinese_char(cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False |
def main(opts):
if os.path.exists(opts.exp_dir):
raise Exception('Oops... {} already exists'.format(opts.exp_dir))
os.makedirs(opts.exp_dir, exist_ok=True)
opts_dict = vars(opts)
pprint.pprint(opts_dict)
with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
json.dump(opts_dict, f, indent=4, sort_keys=True)
coach = Coach(opts)
coach.train() |
class SequentialWrapperTwice(SequentialWrapper):
def __init__(self, com_transform: _pil2pil_transform_type=None, image_transform: _pil2tensor_transform_type=pil_augment.ToTensor(), target_transform: _pil2tensor_transform_type=pil_augment.ToLabel(), total_freedom=True) -> None:
super().__init__(com_transform, image_transform, target_transform)
self._total_freedom = total_freedom
def __call__(self, image_list: _pil_list, target_list: _pil_list=None, seed: int=None, **kwargs) -> Tuple[(List[Tensor], List[Tensor])]:
seed = (seed or random_int())
with fix_all_seed_for_transforms(seed):
(comm_seed1, comm_seed2) = (random_int(), random_int())
(img_seed1, img_seed2) = (random_int(), random_int())
(target_seed1, target_seed2) = (random_int(), random_int())
if self._total_freedom:
(image_list1, target_list1) = super(SequentialWrapperTwice, self).__call__(image_list, target_list, comm_seed1, img_seed1, target_seed1)
(image_list2, target_list2) = super(SequentialWrapperTwice, self).__call__(image_list, target_list, comm_seed2, img_seed2, target_seed2)
return ([*image_list1, *image_list2], [*target_list1, *target_list2])
(image_list1, target_list1) = super(SequentialWrapperTwice, self).__call__(image_list, target_list, comm_seed1, img_seed1, target_seed1)
(image_list2, target_list2) = super(SequentialWrapperTwice, self).__call__(image_list, target_list, comm_seed1, img_seed2, target_seed1)
return ([*image_list1, *image_list2], [*target_list1, *target_list2]) |
class FeedForwardNetwork(Layer):
def __init__(self, hidden_size, filter_size, relu_dropout, bigdl_type='float'):
super(FeedForwardNetwork, self).__init__(None, bigdl_type, hidden_size, filter_size, relu_dropout) |
def log_results(result: Dataset, args: Dict[(str, str)]):
log_outputs = args.log_outputs
dataset_id = '_'.join((args.dataset.split('/') + [args.config, args.split]))
wer = load_metric('wer')
cer = load_metric('cer')
wer_result = wer.compute(references=result['target'], predictions=result['prediction'])
cer_result = cer.compute(references=result['target'], predictions=result['prediction'])
result_str = f'''WER: {wer_result}
CER: {cer_result}'''
print(result_str)
with open(f'{dataset_id}_eval_results.txt', 'w') as f:
f.write(result_str)
if (log_outputs is not None):
pred_file = f'log_{dataset_id}_predictions.txt'
target_file = f'log_{dataset_id}_targets.txt'
with open(pred_file, 'w') as p, open(target_file, 'w') as t:
def write_to_file(batch, i):
p.write((f'{i}' + '\n'))
p.write((batch['prediction'] + '\n'))
t.write((f'{i}' + '\n'))
t.write((batch['target'] + '\n'))
result.map(write_to_file, with_indices=True) |
def rm_key_from_odict(odict_obj, rm_suffix):
return OrderedDict([(k, v) for (k, v) in odict_obj.items() if (rm_suffix not in k)]) |
class SEWDConfig(PretrainedConfig):
model_type = 'sew-d'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, squeeze_factor=2, max_position_embeddings=512, position_buckets=256, share_att_key=True, relative_attention=True, position_biased_input=False, pos_att_type=('p2c', 'c2p'), norm_rel_ebd='layer_norm', hidden_act='gelu_python', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-07, feature_layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.squeeze_factor = squeeze_factor
self.max_position_embeddings = max_position_embeddings
self.position_buckets = position_buckets
self.share_att_key = share_att_key
self.relative_attention = relative_attention
self.norm_rel_ebd = norm_rel_ebd
self.position_biased_input = position_biased_input
self.pos_att_type = list(pos_att_type)
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.feature_layer_norm_eps = feature_layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
if ((len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(f'Configuration for convolutional layers is incorrect.It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size |
class CoreDiffusion(nn.Module):
input_dim: int
output_dim: int
layer_num: int
bias: bool
rnn_type: str
def __init__(self, input_dim, output_dim, core_num=1, bias=True, rnn_type='GRU'):
super(CoreDiffusion, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.bias = bias
self.core_num = core_num
self.rnn_type = rnn_type
self.linear = nn.Linear(input_dim, output_dim)
assert (self.rnn_type in ['LSTM', 'GRU'])
if (self.rnn_type == 'LSTM'):
self.rnn = nn.LSTM(input_size=input_dim, hidden_size=output_dim, num_layers=1, bias=bias, batch_first=True)
else:
self.rnn = nn.GRU(input_size=input_dim, hidden_size=output_dim, num_layers=1, bias=bias, batch_first=True)
self.norm = nn.LayerNorm(output_dim)
def forward(self, x, adj_list):
hx_list = []
for (i, adj) in enumerate(adj_list):
if (i == 0):
res = torch.sparse.mm(adj, x)
else:
res = (hx_list[(- 1)] + torch.sparse.mm(adj, x))
hx_list.append(res)
hx_list = [F.relu(res) for res in hx_list]
hx = torch.stack(hx_list, dim=0).transpose(0, 1)
(output, _) = self.rnn(hx)
output = output.sum(dim=1)
output = self.norm(output)
return output |
def main():
args = parse_args()
print('==> Load dataset ...')
(X, y) = read_data(args.dataroot, debug=False)
print('==> Initialize DA-RNN model ...')
model = DA_RNN(X, y, args.ntimestep, args.nhidden_encoder, args.nhidden_decoder, args.batchsize, args.lr, args.epochs)
print('==> Start training ...')
model.train()
y_pred = model.test()
fig1 = plt.figure()
plt.semilogy(range(len(model.iter_losses)), model.iter_losses)
plt.savefig('1.png')
plt.close(fig1)
fig2 = plt.figure()
plt.semilogy(range(len(model.epoch_losses)), model.epoch_losses)
plt.savefig('2.png')
plt.close(fig2)
fig3 = plt.figure()
plt.plot(y_pred, label='Predicted')
plt.plot(model.y[model.train_timesteps:], label='True')
plt.legend(loc='upper left')
plt.savefig('3.png')
plt.close(fig3)
print('Finished Training') |
class RuleRefitter():
def __init__(self, quantitative_dataframe):
self.__dataframe = quantitative_dataframe
def transform(self, rules):
copied_rules = [rule.copy() for rule in rules]
refitted = [self.__refit(rule) for rule in copied_rules]
return refitted
def __refit(self, rule):
for (idx, literal) in enumerate(rule.antecedent):
(attribute, interval) = literal
if (type(interval) == str):
continue
current_attribute_values = self.__dataframe.column(attribute)
refitted_interval = interval.refit(current_attribute_values)
rule.antecedent[idx] = (attribute, refitted_interval)
return rule |
def get_dataset(dataset):
if ((dataset == 'cifar10') or (dataset == 'cifar100')):
image_size = (32, 32, 3)
transform = transforms.ToTensor()
if (dataset == 'cifar10'):
data = datasets.CIFAR10
else:
data = datasets.CIFAR100
train_set = data(DATA_PATH, train=True, transform=transform, download=True)
test_set = data(DATA_PATH, train=False, transform=transform, download=True)
return (train_set, test_set, image_size)
elif ((dataset == 'cifar10_lin') or (dataset == 'cifar100_lin')):
image_size = (32, 32, 3)
train_transform = transforms.Compose([transforms.RandomResizedCrop(size=32, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
test_transform = transforms.ToTensor()
if (dataset == 'cifar10_lin'):
data = datasets.CIFAR10
else:
data = datasets.CIFAR100
train_set = data(DATA_PATH, train=True, transform=train_transform, download=True)
test_set = data(DATA_PATH, train=False, transform=test_transform, download=True)
return (train_set, test_set, image_size)
elif ((dataset == 'cifar10_hflip') or (dataset == 'cifar100_hflip')):
image_size = (32, 32, 3)
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
if (dataset == 'cifar10_hflip'):
data = datasets.CIFAR10
else:
data = datasets.CIFAR100
train_set = data(DATA_PATH, train=True, transform=train_transform, download=True)
test_set = data(DATA_PATH, train=False, transform=transforms.ToTensor(), download=True)
return (train_set, test_set, image_size)
elif (dataset == 'celeba128'):
image_size = (128, 128, 3)
data_path = f'{DATA_PATH}/CelebAMask-HQ/CelebA-128-split'
train_dir = os.path.join(data_path, 'train')
test_dir = os.path.join(data_path, 'test')
train_set = datasets.ImageFolder(train_dir, transforms.ToTensor())
test_set = datasets.ImageFolder(test_dir, transforms.ToTensor())
return (train_set, test_set, image_size)
elif (dataset == 'afhq_cat'):
image_size = (512, 512, 3)
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
train_dir = os.path.join(DATA_PATH, 'afhq/cat/train')
val_dir = os.path.join(DATA_PATH, 'afhq/cat/val')
train_set = datasets.ImageFolder(train_dir, train_transform)
val_set = datasets.ImageFolder(val_dir, transforms.ToTensor())
return (train_set, val_set, image_size)
elif (dataset == 'afhq_dog'):
image_size = (512, 512, 3)
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
train_dir = os.path.join(DATA_PATH, 'afhq/dog/train')
val_dir = os.path.join(DATA_PATH, 'afhq/dog/val')
train_set = datasets.ImageFolder(train_dir, train_transform)
val_set = datasets.ImageFolder(val_dir, transforms.ToTensor())
return (train_set, val_set, image_size)
elif (dataset == 'afhq_wild'):
image_size = (512, 512, 3)
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
train_dir = os.path.join(DATA_PATH, 'afhq/wild/train')
val_dir = os.path.join(DATA_PATH, 'afhq/wild/val')
train_set = datasets.ImageFolder(train_dir, train_transform)
val_set = datasets.ImageFolder(val_dir, transforms.ToTensor())
return (train_set, val_set, image_size) |
def _sorted(dict_):
try:
return sorted(six.iterkeys(dict_))
except TypeError:
invalidInputError(False, 'nest only supports dicts with sortable keys.') |
class TerminalRenderer(Renderer):
def __init__(self, col_sep=' '):
super().__init__()
self.col_sep = col_sep
def render_cell(self, table, row, col, widths):
cell = table.rows[row].cells[col]
str = (cell.fmt.fmt % cell.data)
str_width = len(str)
cell_width = sum([widths[idx] for idx in range(col, (col + cell.span))])
cell_width += (len(self.col_sep) * (cell.span - 1))
if (len(str) > cell_width):
str = str[:cell_width]
if cell.fmt.bold:
str = ((sty.bold + str) + sty.rs)
if isinstance(cell.fmt.fgcolor, TerminalColor):
str = ((cell.fmt.fgcolor.color + str) + sty.rs)
if (str_width < cell_width):
n_ws = (cell_width - str_width)
if (table.get_cell_align(row, col) == 'r'):
str = ((' ' * n_ws) + str)
elif (table.get_cell_align(row, col) == 'l'):
str = (str + (' ' * n_ws))
elif (table.get_cell_align(row, col) == 'c'):
n_ws1 = (n_ws // 2)
n_ws0 = (n_ws - n_ws1)
str = (((' ' * n_ws0) + str) + (' ' * n_ws1))
if isinstance(cell.fmt.bgcolor, TerminalColor):
str = ((cell.fmt.bgcolor.color + str) + sty.rs)
return str
def render_separator(self, separator, tab, col_widths, total_width):
if (separator == Separator.HEAD):
return ('=' * total_width)
elif (separator == Separator.INNER):
return ('-' * total_width)
elif (separator == Separator.BOTTOM):
return ('=' * total_width)
def render(self, table):
widths = self.col_widths(table)
total_width = (sum(widths) + (len(self.col_sep) * (table.n_cols - 1)))
lines = []
for (ridx, row) in enumerate(table.rows):
if (row.pre_separator is not None):
sepline = self.render_separator(row.pre_separator, table, widths, total_width)
if (len(sepline) > 0):
lines.append(sepline)
line = []
for (cidx, cell) in enumerate(row.cells):
line.append(self.render_cell(table, ridx, cidx, widths))
lines.append(self.col_sep.join(line))
if (row.post_separator is not None):
sepline = self.render_separator(row.post_separator, table, widths, total_width)
if (len(sepline) > 0):
lines.append(sepline)
return '\n'.join(lines) |
def video_from_sequence(input_dir, output_file, reference_file=None, ext=None, fps=None, bitrate=None, include_audio=False, lossless=None):
input_path = Path(input_dir)
output_file_path = Path(output_file)
reference_file_path = (Path(reference_file) if (reference_file is not None) else None)
if (not input_path.exists()):
io.log_err('input_dir not found.')
return
if (not output_file_path.parent.exists()):
output_file_path.parent.mkdir(parents=True, exist_ok=True)
return
out_ext = output_file_path.suffix
if (ext is None):
ext = io.input_str('Input image format (extension)', 'png')
if (lossless is None):
lossless = io.input_bool('Use lossless codec', False)
video_id = None
audio_id = None
ref_in_a = None
if (reference_file_path is not None):
if (reference_file_path.suffix == '.*'):
reference_file_path = pathex.get_first_file_by_stem(reference_file_path.parent, reference_file_path.stem)
elif (not reference_file_path.exists()):
reference_file_path = None
if (reference_file_path is None):
io.log_err('reference_file not found.')
return
probe = ffmpeg.probe(str(reference_file_path))
for stream in probe['streams']:
if ((video_id is None) and (stream['codec_type'] == 'video')):
video_id = stream['index']
fps = stream['r_frame_rate']
if ((audio_id is None) and (stream['codec_type'] == 'audio')):
audio_id = stream['index']
if (audio_id is not None):
ref_in_a = ffmpeg.input(str(reference_file_path))[str(audio_id)]
if (fps is None):
fps = max(1, io.input_int('Enter FPS', 25))
if ((not lossless) and (bitrate is None)):
bitrate = max(1, io.input_int('Bitrate of output file in MB/s', 16))
input_image_paths = pathex.get_image_paths(input_path)
i_in = ffmpeg.input('pipe:', format='image2pipe', r=fps)
output_args = [i_in]
if (include_audio and (ref_in_a is not None)):
output_args += [ref_in_a]
output_args += [str(output_file_path)]
output_kwargs = {}
if lossless:
output_kwargs.update({'c:v': 'libx264', 'crf': '0', 'pix_fmt': 'yuv420p'})
else:
output_kwargs.update({'c:v': 'libx264', 'b:v': ('%dM' % bitrate), 'pix_fmt': 'yuv420p'})
if (include_audio and (ref_in_a is not None)):
output_kwargs.update({'c:a': 'aac', 'b:a': '192k', 'ar': '48000', 'strict': 'experimental'})
job = ffmpeg.output(*output_args, **output_kwargs).overwrite_output()
try:
job_run = job.run_async(pipe_stdin=True)
for image_path in input_image_paths:
with open(image_path, 'rb') as f:
image_bytes = f.read()
job_run.stdin.write(image_bytes)
job_run.stdin.close()
job_run.wait()
except:
io.log_err(('ffmpeg fail, job commandline:' + str(job.compile()))) |
def build_dataset_from_cfg(cfg, default_args=None):
return DATASETS.build(cfg, default_args=default_args) |
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(infinibatch.iterators))
return tests |
def main(data_shape, config_file, mobile_name):
cfg = get_cfg_defaults()
cfg.merge_from_file(config_file)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
cpu_device = torch.device('cpu')
model = build_recognizer(cfg, cpu_device)
model.eval()
data = torch.randn(data_shape).to(device=cpu_device, non_blocking=True)
(GFlops, params_size) = compute_num_flops(model, data)
print((f'{mobile_name} ' + ('*' * 10)))
print(f'device: {cpu_device}')
print(f'GFlops: {GFlops:.3f}G')
print(f'Params Size: {params_size:.3f}MB')
model = build_recognizer(cfg, cpu_device)
model.eval()
print(f'compute cpu infer time')
compute_model_time(data_shape, model, cpu_device)
del model
torch.cuda.empty_cache() |
class SetPosition(abstract_action_space.AbstractActionSpace):
def __init__(self, action_layers='agent', inertia=0.0):
if (not isinstance(action_layers, (list, tuple))):
action_layers = (action_layers,)
self._action_layers = action_layers
self._inertia = inertia
self._action_spec = specs.BoundedArray(shape=(2,), dtype=np.float32, minimum=0, maximum=1)
def step(self, state, action):
for action_layer in self._action_layers:
for sprite in state[action_layer]:
sprite.position = ((self._inertia * sprite.position) + ((1 - self._inertia) * action))
def random_action(self):
return np.random.uniform(0.0, 1.0, size=(2,))
def action_spec(self):
return self._action_spec |
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=10000, help='how many test images to run')
self.isTrain = False |
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, relu_first=True, bias=False, norm_layer=nn.BatchNorm2d):
super().__init__()
depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride=stride, padding=dilation, dilation=dilation, groups=inplanes, bias=bias)
bn_depth = norm_layer(inplanes)
pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias)
bn_point = norm_layer(planes)
if relu_first:
self.block = nn.Sequential(OrderedDict([('relu', nn.ReLU()), ('depthwise', depthwise), ('bn_depth', bn_depth), ('pointwise', pointwise), ('bn_point', bn_point)]))
else:
self.block = nn.Sequential(OrderedDict([('depthwise', depthwise), ('bn_depth', bn_depth), ('relu1', nn.ReLU(inplace=True)), ('pointwise', pointwise), ('bn_point', bn_point), ('relu2', nn.ReLU(inplace=True))]))
def forward(self, x):
return self.block(x) |
class experiment_testcase(unittest.TestCase):
def test_experiment_and_writeup_cp(self):
experiment.run_experiment_file('../experiments/debug/debug_changepoint.py')
postprocessing.make_all_1d_figures(['../results/debug-changepoint/'], '../analyses/debug-changepoint/figures/', rescale=False, data_folder='../data/debug/', skip_kernel_evaluation=False)
def test_experiment_and_writeup_cw(self):
experiment.run_experiment_file('../experiments/debug/debug_changewindow.py')
postprocessing.make_all_1d_figures(['../results/debug-changewindow/'], '../analyses/debug-changewindow/figures/', rescale=False, data_folder='../data/debug/', skip_kernel_evaluation=False)
def test_experiment_and_writeup(self):
experiment.run_debug_kfold()
postprocessing.make_all_1d_figures(['../results/debug/'], '../analyses/debug/figures/', rescale=False, data_folder='../data/debug/', skip_kernel_evaluation=False) |
class TestLADHead(TestCase):
def test_lad_head_loss(self):
class mock_skm():
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape((- 1))
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1}]
train_cfg = Config(dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=(- 1)), allowed_border=(- 1), pos_weight=(- 1), debug=False))
lad = LADHead(num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(num_classes=4, in_channels=1, train_cfg=train_cfg, loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16, 32, 64]]
lad.init_weights()
teacher_model.init_weights()
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
batch_gt_instances_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(*outs_teacher, [gt_instances], img_metas, batch_gt_instances_ignore)
outs = teacher_model(feat)
empty_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas, batch_gt_instances_ignore, label_assignment_results)
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
self.assertGreater(empty_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes')
self.assertEqual(empty_iou_loss.item(), 0, 'there should be no box loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
batch_gt_instances_ignore = None
label_assignment_results = teacher_model.get_label_assignment(*outs_teacher, [gt_instances], img_metas, batch_gt_instances_ignore)
one_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas, batch_gt_instances_ignore, label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
self.assertGreater(onegt_iou_loss.item(), 0, 'box loss should be non-zero')
(n, c, h, w) = (10, 4, 20, 20)
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
self.assertEqual(len(results), n)
self.assertEqual(results[0].size(), (((h * w) * 5), c))
self.assertTrue(lad.with_score_voting)
lad = LADHead(num_classes=4, in_channels=1, train_cfg=train_cfg, anchor_generator=dict(type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8]), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = Config(dict(nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
rescale = False
lad.predict_by_feat(cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale) |
def add_journal_subfield(field, element, reference_format):
add_subfield(field, 'journal_title', element.get('title'))
add_subfield(field, 'journal_volume', element.get('volume'))
add_subfield(field, 'journal_year', element.get('year'))
add_subfield(field, 'journal_page', element.get('page'))
add_subfield(field, 'journal_reference', reference_format.format(**element)) |
class GenerateGraphWithQDQPattern(GraphRewriterBase):
def __init__(self, model, calibration_data, op_wise_config, fake_quant, fp32_ops, bf16_ops, quantized_nodes, device, performance_only, itex_mode, llm_weight_minmax):
super().__init__(model)
self.data = calibration_data
self.op_wise_config = op_wise_config
self.fake_quant = fake_quant
self.fp32_ops = fp32_ops
self.bf16_ops = bf16_ops
self.quantized_nodes = quantized_nodes
self.device = device
self.performance_only = performance_only
self.itex_mode = itex_mode
self.llm_weight_minmax = llm_weight_minmax
self.node_details = namedtuple('node_details', ['node', 'output'])
self.node_name_mapping = {}
self.check_op_list = {'ConcatV2', 'Conv2D', 'Conv3D', 'DepthwiseConv2D', 'QuantizeV2', 'DepthwiseConv2dNative', 'MaxPool', 'MaxPool3D', 'FusedBatchNormV3', 'Requantize', 'RequantizePerChannel', 'AvgPool', 'Pad', 'CropAndResize', 'Dequantize', 'Mean', 'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'FakeQuantWithMinMaxVars', '_MklFusedInstanceNorm', 'Conv2DBackpropInput', 'Conv3DBackpropInputV2', 'Sigmoid'}
for node in self.model.node:
if (node.name in self.node_name_mapping):
raise ValueError('Duplicate Node Found when _parse_graph, the node name is {}'.format(node.name))
self.node_name_mapping[node.name] = self.node_details(node=node, output=[])
for node_name in self.node_name_mapping:
for each_input in self.node_name_mapping[node_name].node.input:
self.node_name_mapping[Helper.node_name_from_input(each_input)].output.append(node_name)
_elapsed_time('Pass GenerateGraphWithQDQPattern')
def do_transformation(self):
min_max_values = {}
for i in self.data:
if (i.find('_requant') == (- 1)):
(key, value) = (i.rsplit(':', 1)[0], i.rsplit(':', 1)[1])
key = (key.split('_eightbit_')[0][1:] + key[(- 5):])
if (key not in min_max_values):
min_max_values[key] = [float(value[1:(- 1)])]
else:
min_max_values[key].append(float(value[1:(- 1)]))
quantizable_op_names = []
for i in min_max_values:
if (i.split('__')[0] not in quantizable_op_names):
quantizable_op_names.append(i.split('__')[0])
self.g = GraphAnalyzer()
self.g.graph = copy.deepcopy(self.model)
self.graph_info = self.g.parse_graph()
self.g.get_frame_info()
for op_name in quantizable_op_names:
if self._ignore_insert_qdq_pattern(op_name):
continue
if (op_name not in self.op_wise_config.keys()):
is_asymmetric = False
else:
op_wise_cfg = self.op_wise_config[op_name]
is_asymmetric = op_wise_cfg[2]
if (self.graph_info[op_name].node.op == 'ConcatV2'):
if (not self.itex_mode):
self._insert_qdq_pattern_for_concatv2(self.graph_info[op_name].node, is_asymmetric)
else:
self._insert_qdq_pattern_for_common_ops(self.graph_info[op_name].node, is_asymmetric)
self.g_weight = GraphAnalyzer()
self.g_weight.graph = self.g.dump_graph()
self.graph_info = self.g_weight.parse_graph()
target_nodes = self.g_weight.query_fusion_pattern_nodes([['Conv2D', 'Conv3D', 'DepthwiseConv2dNative', 'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'Conv2DBackpropInput', 'Conv3DBackpropInputV2']])
for i in target_nodes:
if (i[0] not in quantizable_op_names):
continue
computational_node_name = i[0]
if self._ignore_insert_qdq_pattern(computational_node_name):
continue
computational_node = self.graph_info[computational_node_name].node
weight_name = computational_node.input[1]
if re.search('\\w+:\\d+', weight_name):
weight_node = self.graph_info[weight_name.rsplit(':', 1)[0]].node
else:
weight_node = self.graph_info[weight_name].node
if (weight_node.op == 'Enter'):
if self.itex_mode:
parent_node = self.graph_info[Helper.node_name_from_input(weight_node.input[0])].node
if (not (parent_node.op == 'Const')):
continue
weight_node = parent_node
else:
continue
if (computational_node_name in self.op_wise_config.keys()):
op_wise_cfg = self.op_wise_config[computational_node_name]
per_channel = op_wise_cfg[0]
weight_bit = op_wise_cfg[3]
else:
per_channel = False
weight_bit = 7
self._insert_qdq_pattern_for_weight_node(computational_node, weight_node, weight_name, min_max_values, per_channel, weight_bit, self.device)
self.g_qdq = GraphAnalyzer()
self.g_qdq.graph = self.g_weight.dump_graph()
self.graph_info = self.g_qdq.parse_graph()
patterns = [['QuantizeV2'], ['Dequantize']]
matched_nodes = self.g_qdq.query_fusion_pattern_nodes(patterns)
for i in matched_nodes:
quantize_node_name = self.graph_info[i[0]].node.name
deq_node_name = self.graph_info[i[1]].node.name
deq_node = self.graph_info[i[1]].node
len_deq_outputs = len(self.g_qdq.node_name_details[deq_node_name].outputs)
if (len_deq_outputs == 1):
continue
for index in range((len_deq_outputs - 1)):
rep_dequantize_node = Helper.create_node('Dequantize', ((deq_node_name + '_') + str((index + 1))), [quantize_node_name, (quantize_node_name + ':1'), (quantize_node_name + ':2')])
rep_dequantize_node.attr['T'].CopyFrom(deq_node.attr['T'])
rep_dequantize_node.attr['mode'].CopyFrom(deq_node.attr['mode'])
if ('axis' in deq_node.attr):
rep_dequantize_node.attr['axis'].CopyFrom(deq_node.attr['axis'])
next_node_name = self.g_qdq.node_name_details[deq_node_name].outputs[(index + 1)]
self.g_qdq.add_node(rep_dequantize_node, quantize_node_name, [next_node_name])
for (input_index, each_input) in enumerate(self.g_qdq.node_name_details[next_node_name].node.input):
if (each_input == deq_node_name):
self.g_qdq.node_name_details[next_node_name].node.input[input_index] = rep_dequantize_node.name
return self.g_qdq.dump_graph()
def _check_op_list(self, node_type):
return any([(node_type.find(i) != (- 1)) for i in self.check_op_list])
def _find_relu_node(self, node):
if (node.op == 'MaxPool'):
self.check_op_list.add('BiasAdd')
if (((node.op in ('Relu', 'Relu6', 'Elu')) or ((node.op.find('AndRelu') != (- 1)) and (('alpha' not in node.attr) or (('alpha' in node.attr) and (node.attr['alpha'].f == 0))))) and ((node.op != 'Relu') or (not self.performance_only) or (self.node_name_mapping[Helper.node_name_from_input(node.input[0])].node.op.find('FusedBatchNorm') == (- 1)) or self.node_name_mapping[Helper.node_name_from_input(node.input[0])].node.attr['is_training'].b or (len(self.node_name_mapping[Helper.node_name_from_input(node.input[0])].output) > 1))):
return True
elif (('T' in node.attr) and (dtypes.DType(node.attr['T'].type) in (dtypes.quint8, dtypes.uint8))):
return True
elif (((node.op.find('QuantizedConv') != (- 1)) or (node.op.find('QuantizedDepthwiseConv') != (- 1)) or (node.op.find('QuantizedMatMul') != (- 1))) and (((node.op.find('Relu') == (- 1)) and (node.op.find('Elu') == (- 1))) or (('alpha' in node.attr) and (node.attr['alpha'].f > 0)))):
return False
elif (self.itex_mode and (node.op in ('Add', 'AddV2', 'AddN'))):
if re.search('\\w+:\\d+', node.input[0]):
input0_node = self.node_name_mapping[node.input[0].rsplit(':', 1)[0]].node
else:
input0_node = self.node_name_mapping[node.input[0]].node
if re.search('\\w+:\\d+', node.input[1]):
input1_node = self.node_name_mapping[node.input[1].rsplit(':', 1)[0]].node
else:
input1_node = self.node_name_mapping[node.input[1]].node
if ((input0_node.op in ('BiasAdd', 'Add', 'AddV2', 'AddN')) or (input1_node.op in ('BiasAdd', 'Add', 'AddV2', 'AddN'))):
return False
return (self._find_relu_node(input0_node) and self._find_relu_node(input1_node))
elif (self._check_op_list(node.op) or (self.itex_mode and (node.op in ('Add', 'AddV2')))):
if (node.op == 'ConcatV2'):
find_relu = False
for i in range(0, node.attr['N'].i):
if re.search('\\w+:\\d+', node.input[i]):
input_node = self.node_name_mapping[node.input[i].rsplit(':', 1)[0]].node
else:
input_node = self.node_name_mapping[node.input[i]].node
find_relu |= self._find_relu_node(input_node)
return find_relu
if re.search('\\w+:\\d+', node.input[0]):
input_node = self.node_name_mapping[node.input[0].rsplit(':', 1)[0]].node
else:
input_node = self.node_name_mapping[node.input[0]].node
return self._find_relu_node(input_node)
else:
return False
def _insert_qdq_pattern_for_common_ops(self, original_node, is_asymmetric):
namespace_prefix = (original_node.name + '_eightbit')
if (original_node.op in ('Conv2DBackpropInput', 'Conv3DBackpropInputV2')):
all_inputs = self.node_name_mapping[original_node.name].node.input[(- 1):]
else:
all_inputs = self.node_name_mapping[original_node.name].node.input[:1]
for each_input_name in all_inputs:
if (each_input_name[0] == '^'):
continue
if (self.itex_mode and (each_input_name in self.node_name_mapping) and (self.node_name_mapping[each_input_name].node.op == 'MaxPool') and (self.graph_info[self.graph_info[each_input_name].node.input[0]].node.op == 'Dequantize')):
maxpool_node = self.graph_info[each_input_name].node
dtype = dtypes.DType(self.graph_info[maxpool_node.input[0]].node.attr['T'].type)
elif (self.node_name_mapping[original_node.name].node.op == 'MatMul'):
dtype = dtypes.quint8
elif ((self.node_name_mapping[original_node.name].node.op == 'BatchMatMulV2') or (self.node_name_mapping[original_node.name].node.op == 'BatchMatMul')):
dtype = dtypes.qint8
elif (self.node_name_mapping[original_node.name].node.op == 'FusedBatchNormV3'):
dtype = dtypes.qint8
else:
input_node_name = Helper.node_name_from_input(each_input_name)
if (input_node_name in self.graph_info):
if (self.graph_info[input_node_name].node.op == 'Dequantize'):
dtype = dtypes.DType(self.graph_info[input_node_name].node.attr['T'].type)
elif (self.graph_info[input_node_name].node.op == 'FusedBatchNormV3'):
dtype = dtypes.qint8
elif self._find_relu_node(self.node_name_mapping[original_node.name].node):
dtype = dtypes.quint8
else:
dtype = dtypes.qint8
else:
dtype = (dtypes.quint8 if self._find_relu_node(self.node_name_mapping[original_node.name].node) else dtypes.qint8)
self._insert_qdq_pattern_for_each_input(original_node.name, namespace_prefix, each_input_name, is_asymmetric, dtype, device=self.device)
def _insert_qdq_pattern_for_concatv2(self, original_node, is_asymmetric):
namespace_prefix = (original_node.name + '_eightbit')
normal_inputs = [i for i in original_node.input if (i[0] != '^')]
num_input = len(normal_inputs)
original_inputs = normal_inputs[0:(num_input - 1)]
input_idx = 0
for original_input_name in original_inputs:
self._insert_qdq_pattern_for_each_input(original_node.name, namespace_prefix, original_input_name, is_asymmetric, dtypes.quint8, input_idx, device=self.device)
input_idx += 1
def _insert_qdq_pattern_for_each_input(self, op_name, namespace_prefix, input_name, is_asymmetric, dtype=dtypes.quint8, input_index=0, device='cpu'):
unique_input_name = input_name.replace(':', '__port__').replace('^', '__hat__')
min_input_name = ((namespace_prefix + '_min_') + unique_input_name)
max_input_name = ((namespace_prefix + '_max_') + unique_input_name)
quantize_input_name = ((namespace_prefix + '_quantize_') + unique_input_name)
reshape_dims_name = ((namespace_prefix + '_reshape_dims') + unique_input_name)
reduction_dims_name = ((namespace_prefix + '_reduction_dims') + unique_input_name)
if self.fake_quant:
min_node = Helper.create_constant_node(min_input_name, (- 1.0), dtypes.float32, device='cpu')
max_node = Helper.create_constant_node(max_input_name, 1.0, dtypes.float32, device='cpu')
quant_v2_node = Helper.create_node('QuantizeV2', quantize_input_name, [input_name, min_input_name, max_input_name])
Helper.set_attr_dtype(quant_v2_node, 'T', dtype)
if (not is_asymmetric):
Helper.set_attr_string(quant_v2_node, 'round_mode', b'HALF_TO_EVEN')
if ('BatchMatMul' in self.graph_info[op_name].node.op):
Helper.set_attr_string(quant_v2_node, 'mode', b'SCALED')
else:
Helper.set_attr_string(quant_v2_node, 'mode', (b'MIN_FIRST' if is_asymmetric else b'SCALED'))
if ('Concat' in self.graph_info[op_name].node.op):
dequantize_node = Helper.create_node('Dequantize', ((op_name + '_dequantize_') + str(input_index)), [quant_v2_node.name, (quant_v2_node.name + ':1'), (quant_v2_node.name + ':2')])
else:
dequantize_node = Helper.create_node('Dequantize', (op_name + '_dequantize'), [quant_v2_node.name, (quant_v2_node.name + ':1'), (quant_v2_node.name + ':2')])
Helper.set_attr_dtype(dequantize_node, 'T', dtype)
if ('BatchMatMul' in self.graph_info[op_name].node.op):
Helper.set_attr_string(dequantize_node, 'mode', b'SCALED')
else:
Helper.set_attr_string(dequantize_node, 'mode', (b'MIN_FIRST' if is_asymmetric else b'SCALED'))
self.g.add_node(quant_v2_node, self.graph_info[op_name].node.input[0], [dequantize_node.name])
self.g.add_node(dequantize_node, quant_v2_node.name, [op_name])
self.g.add_node(min_node, None, [quant_v2_node.name])
self.g.add_node(max_node, None, [quant_v2_node.name])
self.graph_info[op_name].node.input[input_index] = dequantize_node.name
else:
reshape_dims_node = Helper.create_constant_node(reshape_dims_name, (- 1), dtypes.int32, [1])
reduction_dims_node = Helper.create_constant_node(reduction_dims_name, 0, dtypes.int32, [1])
reshape_input_name = ((namespace_prefix + '_reshape_') + unique_input_name)
if (self.itex_mode and (self.graph_info[op_name].node.op == 'FusedBatchNormV3')):
min_input_name = (namespace_prefix + '_input7_output_min')
max_input_name = (namespace_prefix + '_input8_output_max')
quantize_input_name = (namespace_prefix + '_quantize_bn')
else:
min_input_name = ((namespace_prefix + '_min_') + unique_input_name)
max_input_name = ((namespace_prefix + '_max_') + unique_input_name)
quantize_input_name = ((namespace_prefix + '_quantize_') + unique_input_name)
reshape_input_node = Helper.create_node('Reshape', reshape_input_name, [input_name, reshape_dims_name])
Helper.set_attr_dtype(reshape_input_node, 'T', dtypes.float32)
min_input_node = Helper.create_node('Min', min_input_name, [reshape_input_name, reduction_dims_name])
Helper.set_attr_dtype(min_input_node, 'T', dtypes.float32)
Helper.set_attr_dtype(min_input_node, 'Tidx', dtypes.int32)
Helper.set_attr_bool(min_input_node, 'keep_dims', False)
max_input_node = Helper.create_node('Max', max_input_name, [reshape_input_name, reduction_dims_name])
Helper.set_attr_dtype(max_input_node, 'T', dtypes.float32)
Helper.set_attr_dtype(max_input_node, 'Tidx', dtypes.int32)
Helper.set_attr_bool(max_input_node, 'keep_dims', False)
if ('BatchMatMul' in self.graph_info[op_name].node.op):
min_input_node.input.append(('^' + input_name))
max_input_node.input.append(('^' + input_name))
if self.itex_mode:
min_input_node.input.append(('^' + input_name))
max_input_node.input.append(('^' + input_name))
quant_v2_node = Helper.create_node('QuantizeV2', quantize_input_name, [input_name, min_input_name, max_input_name])
Helper.set_attr_dtype(quant_v2_node, 'T', dtype)
if (not is_asymmetric):
Helper.set_attr_string(quant_v2_node, 'round_mode', b'HALF_TO_EVEN')
if (self.performance_only or ('BatchMatMul' in self.graph_info[op_name].node.op)):
Helper.set_attr_string(quant_v2_node, 'mode', b'SCALED')
else:
Helper.set_attr_string(quant_v2_node, 'mode', (b'MIN_FIRST' if is_asymmetric else b'SCALED'))
if ('Concat' in self.graph_info[op_name].node.op):
dequantize_node = Helper.create_node('Dequantize', ((op_name + '_dequantize_') + str(input_index)), [quant_v2_node.name, (quant_v2_node.name + ':1'), (quant_v2_node.name + ':2')])
else:
dequantize_node = Helper.create_node('Dequantize', (op_name + '_dequantize'), [quant_v2_node.name, (quant_v2_node.name + ':1'), (quant_v2_node.name + ':2')])
Helper.set_attr_dtype(dequantize_node, 'T', dtype)
if (self.performance_only or ('BatchMatMul' in self.graph_info[op_name].node.op)):
Helper.set_attr_string(dequantize_node, 'mode', b'SCALED')
else:
Helper.set_attr_string(dequantize_node, 'mode', (b'MIN_FIRST' if is_asymmetric else b'SCALED'))
if (self.graph_info[op_name].node.op in ('Conv2DBackpropInput', 'Conv3DBackpropInputV2')):
input_index = 2
self.g.add_node(quant_v2_node, self.graph_info[op_name].node.input[input_index], [dequantize_node.name])
self.g.add_node(dequantize_node, quant_v2_node.name, [op_name])
self.g.add_node(reshape_dims_node, None, [reshape_input_name])
self.g.add_node(reduction_dims_node, None, [min_input_name, max_input_name])
self.g.add_node(reshape_input_node, reshape_dims_name, [min_input_name, max_input_name])
self.g.add_node(min_input_node, reshape_input_name, [quant_v2_node.name])
self.g.add_node(max_input_node, reshape_input_name, [quant_v2_node.name])
self.graph_info[op_name].node.input[input_index] = dequantize_node.name
def _insert_qdq_pattern_for_weight_node(self, computational_node, weight_node, weight_name, min_max_values, per_channel, weight_bit=7.0, device='cpu'):
host_op_type = computational_node.op
base_name = (weight_node.name + '_')
qint8_const_name = (base_name + 'qint8_const')
min_name = (base_name + 'min')
max_name = (base_name + 'max')
epsilon = 0.0001
range_coefficent = (127 / ((2 ** weight_bit) - 1))
min_value = 0
max_value = 0
insert_reshape = False
shape_convert = None
shape_revert = None
if (('value' in weight_node.attr) and (host_op_type in ('Conv2D', 'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'Conv3D', 'Conv2DBackpropInput', 'Conv3DBackpropInputV2'))):
float_tensor = tensor_util.MakeNdarray(weight_node.attr['value'].tensor)
if per_channel:
if (host_op_type in ('Conv3D', 'Conv3DBackpropInputV2')):
ranges = np.abs(float_tensor).max(axis=(0, 1, 2, 3))
elif (host_op_type in ('Conv2D', 'Conv2DBackpropInput')):
ranges = np.abs(float_tensor).max(axis=(0, 1, 2))
elif (host_op_type in 'MatMul'):
if (('transpose_b' in weight_node.attr) and weight_node.attr['transpose_b'].b):
ranges = np.abs(float_tensor).max(axis=1)
else:
ranges = np.abs(float_tensor).max(axis=0)
else:
ranges = np.abs(float_tensor).max(axis=(0, 1))
ranges *= range_coefficent
min_value = (- ranges)
max_value = ranges
ranges[(ranges < epsilon)] = epsilon
min_value[(np.abs(min_value) < epsilon)] = (- epsilon)
max_value[(np.abs(max_value) < epsilon)] = epsilon
else:
min_value = np.min(float_tensor)
max_value = np.max(float_tensor)
min_value *= range_coefficent
max_value *= range_coefficent
min_value = min(min_value, 0.0)
if (min_value == max_value):
if (abs(min_value) < 1e-06):
max_value = (min_value + 1.0)
elif (min_value > 0):
max_value = (2 * min_value)
else:
max_value = (min_value / 2.0)
range_value = np.max(np.abs([min_value, max_value]))
min_value = (- range_value)
max_value = range_value
elif (weight_node.op == 'ReadVariableOp'):
min_value = self.llm_weight_minmax[weight_node.name][0]
max_value = self.llm_weight_minmax[weight_node.name][1]
min_value *= range_coefficent
max_value *= range_coefficent
min_value = min(min_value, 0.0)
if (min_value == max_value):
if (abs(min_value) < 1e-06):
max_value = (min_value + 1.0)
elif (min_value > 0):
max_value = (2 * min_value)
else:
max_value = (min_value / 2.0)
range_value = np.max(np.abs([min_value, max_value]))
min_value = (- range_value)
max_value = range_value
elif (host_op_type == 'DepthwiseConv2dNative'):
float_tensor = tensor_util.MakeNdarray(weight_node.attr['value'].tensor)
ranges = np.abs(float_tensor).max(axis=(0, 1))
ranges = ranges.flatten()
min_value = (- ranges)
max_value = ranges
ranges[(ranges < epsilon)] = epsilon
min_value[(np.abs(min_value) < epsilon)] = (- epsilon)
max_value[(np.abs(max_value) < epsilon)] = epsilon
(a, b, c, d) = float_tensor.shape
if (self.itex_mode and (d != 1)):
insert_reshape = True
shape_convert = [a, b, (c * d)]
shape_revert = [a, b, c, d]
else:
min_value = np.min(min_max_values[(computational_node.name + '__min')])
max_value = np.max(min_max_values[(computational_node.name + '__max')])
min_node = Helper.create_constant_node(min_name, min_value, dtypes.float32, device='cpu')
max_node = Helper.create_constant_node(max_name, max_value, dtypes.float32, device='cpu')
if (('BatchMatMul' in host_op_type) and ('BatchMatMul' not in weight_node.op)):
min_node.input.append(('^' + weight_name))
max_node.input.append(('^' + weight_name))
min_enter_node = None
max_enter_node = None
if insert_reshape:
reshape_dims_4to3_name = (qint8_const_name + '_reshape_dims_4to3_')
reshape_dims_4to3_node = Helper.create_constant_node(reshape_dims_4to3_name, shape_convert, dtypes.int32)
reshape_4to3_name = (qint8_const_name + '_reshape_4to3_')
reshape_4to3_node = Helper.create_node('Reshape', reshape_4to3_name, [weight_node.name, reshape_dims_4to3_name])
reshape_4to3_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
quant_node = Helper.create_node('QuantizeV2', (qint8_const_name + '_quant'), [reshape_4to3_name, min_name, max_name])
else:
quant_node = Helper.create_node('QuantizeV2', (qint8_const_name + '_quant'), [weight_node.name, min_name, max_name])
dequant_node = Helper.create_node('Dequantize', (base_name + '_dequant'), [quant_node.name, (quant_node.name + ':1'), (quant_node.name + ':2')])
Helper.set_attr_dtype(quant_node, 'T', dtypes.qint8)
Helper.set_attr_string(quant_node, 'mode', b'SCALED')
Helper.set_attr_string(quant_node, 'round_mode', b'HALF_TO_EVEN')
Helper.set_attr_dtype(dequant_node, 'T', dtypes.qint8)
Helper.set_attr_string(dequant_node, 'mode', b'SCALED')
if per_channel:
if (host_op_type in ('Conv2D', 'Conv2DBackpropInput')):
Helper.set_attr_int(quant_node, 'axis', 3)
Helper.set_attr_int(dequant_node, 'axis', 3)
elif (host_op_type in ('Conv3D', 'Conv3DBackpropInputV2')):
Helper.set_attr_int(quant_node, 'axis', 4)
Helper.set_attr_int(dequant_node, 'axis', 4)
elif (host_op_type == 'MatMul'):
Helper.set_attr_int(quant_node, 'axis', 1)
Helper.set_attr_int(dequant_node, 'axis', 1)
else:
Helper.set_attr_int(quant_node, 'axis', (- 1))
Helper.set_attr_int(dequant_node, 'axis', (- 1))
if (host_op_type == 'DepthwiseConv2dNative'):
Helper.set_attr_int(quant_node, 'axis', 2)
Helper.set_attr_int(dequant_node, 'axis', 2)
if insert_reshape:
reshape_dims_3to4_name = (qint8_const_name + '_reshape_dims_3to4_')
reshape_dims_3to4_node = Helper.create_constant_node(reshape_dims_3to4_name, shape_revert, dtypes.int32)
reshape_3to4_name = (qint8_const_name + '_reshape_3to4_')
reshape_3to4_node = Helper.create_node('Reshape', reshape_3to4_name, [dequant_node.name, reshape_dims_3to4_name])
reshape_3to4_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))
self.g_weight.add_node(reshape_dims_4to3_node, None, [reshape_4to3_name])
self.g_weight.add_node(reshape_dims_3to4_node, None, [reshape_3to4_name])
self.g_weight.add_node(reshape_4to3_node, weight_node.name, [quant_node.name])
self.g_weight.add_node(quant_node, reshape_4to3_name, [])
self.g_weight.add_node(min_node, None, [quant_node.name])
self.g_weight.add_node(max_node, None, [quant_node.name])
self.g_weight.add_node(dequant_node, quant_node.name, [reshape_3to4_name])
self.g_weight.add_node(reshape_3to4_node, dequant_node.name, [computational_node.name])
computational_node.input[1] = reshape_3to4_node.name
elif ((computational_node.name in self.g.parent_frame_details) and self.g.parent_frame_details[computational_node.name]):
weight_enter_node = Helper.create_node('Enter', (weight_node.name + '_enter'), [weight_node.name])
Helper.set_attr_string(weight_enter_node, 'frame_name', self.g.parent_frame_details[computational_node.name].attr['frame_name'].s)
Helper.set_attr_dtype(weight_enter_node, 'T', dtypes.float32)
Helper.set_attr_bool(weight_enter_node, 'is_constant', True)
Helper.set_attr_int(weight_enter_node, 'parallel_iterations', self.g.parent_frame_details[computational_node.name].attr['parallel_iterations'].i)
min_enter_node = Helper.create_node('Enter', (min_name + '_enter'), [min_name])
Helper.set_attr_string(min_enter_node, 'frame_name', self.g.parent_frame_details[computational_node.name].attr['frame_name'].s)
Helper.set_attr_dtype(min_enter_node, 'T', dtypes.float32)
Helper.set_attr_bool(min_enter_node, 'is_constant', True)
Helper.set_attr_int(min_enter_node, 'parallel_iterations', self.g.parent_frame_details[computational_node.name].attr['parallel_iterations'].i)
max_enter_node = Helper.create_node('Enter', (max_name + '_enter'), [max_name])
Helper.set_attr_string(max_enter_node, 'frame_name', self.g.parent_frame_details[computational_node.name].attr['frame_name'].s)
Helper.set_attr_dtype(max_enter_node, 'T', dtypes.float32)
Helper.set_attr_bool(max_enter_node, 'is_constant', True)
Helper.set_attr_int(max_enter_node, 'parallel_iterations', self.g.parent_frame_details[computational_node.name].attr['parallel_iterations'].i)
self.g_weight.add_node(quant_node, weight_name, [])
self.g_weight.add_node(min_node, None, [min_enter_node.name])
self.g_weight.add_node(max_node, None, [max_enter_node.name])
self.g_weight.add_node(min_enter_node, min_node.name, [quant_node.name])
self.g_weight.add_node(max_enter_node, max_node.name, [quant_node.name])
self.g_weight.add_node(weight_enter_node, weight_node.name, [quant_node.name])
quant_node.input[0] = weight_enter_node.name
quant_node.input[1] = min_enter_node.name
quant_node.input[2] = max_enter_node.name
self.g_weight.add_node(quant_node, weight_enter_node.name, [])
self.g_weight.add_node(dequant_node, quant_node.name, [computational_node.name])
computational_node.input[1] = dequant_node.name
else:
self.g_weight.add_node(quant_node, weight_name, [])
self.g_weight.add_node(min_node, None, [quant_node.name])
self.g_weight.add_node(max_node, None, [quant_node.name])
self.g_weight.add_node(dequant_node, quant_node.name, [computational_node.name])
computational_node.input[1] = dequant_node.name
def _ignore_insert_qdq_pattern(self, matched_node_name):
if (((matched_node_name in self.fp32_ops) or (matched_node_name in self.bf16_ops)) and ((matched_node_name,) not in self.quantized_nodes)):
return True
if ((matched_node_name not in self.op_wise_config) and ((matched_node_name,) not in self.quantized_nodes)):
return True
if ((not self.itex_mode) and (self.graph_info[matched_node_name].node.op == 'MatMul')):
if (self.graph_info[matched_node_name].node.attr['transpose_a'].b is True):
return True
if (('FusedBatchNorm' in self.graph_info[matched_node_name].node.op) and (not self.itex_mode)):
return True
if (('_MklFusedInstanceNorm' == self.graph_info[matched_node_name].node.op) and (not self.itex_mode)):
return True
return False |
def combine_predictions(fname_lst, fname_hard, fname_prob, thr=0.5):
mc_data = np.array([nib.load(fname).get_fdata() for fname in fname_lst])
first_file_header = nib.load(fname_lst[0]).header
data_prob = np.mean(mc_data, axis=0)
nib_prob = nib.Nifti1Image(dataobj=data_prob, affine=first_file_header.get_best_affine(), header=first_file_header.copy())
nib.save(nib_prob, fname_prob)
data_hard = imed_postpro.threshold_predictions(data_prob, thr=thr).astype(np.uint8)
nib_hard = nib.Nifti1Image(dataobj=data_hard, affine=first_file_header.get_best_affine(), header=first_file_header.copy())
nib.save(nib_hard, fname_hard) |
_config
def model_pix_only_base():
n_channels_out = 3
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'n_channels_in': 3, 'n_channels_out': n_channels_out, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'train': True, 'eval_only': False, 'normalize_outputs': True}, 'use_baked_encoding': False, 'side_class': None, 'side_kwargs': {}, 'side_weights_path': None, 'decoder_class': 'TaskonomyDecoder', 'decoder_weights_path': None, 'decoder_kwargs': {'out_channels': n_channels_out, 'eval_only': False}}}}
del n_channels_out |
_metaclass(ABCMeta)
class DatasetPredictorBase(object):
def __init__(self, config, dataset):
assert isinstance(dataset, DataFlow)
assert isinstance(config, PredictConfig)
self.config = config
self.dataset = dataset
def get_result(self):
pass
def get_all_result(self):
return list(self.get_result()) |
(DataGeneration)
class EvaluateGradientVariance(AutoNamingTask):
EvaluateGradientVariance_params = luigi.DictParameter()
train_seed = luigi.IntParameter()
def run_task(self, input_list):
(_, train_po_history_list, _, _, _) = input_list[0]
train_model = get_initial_model(self.EvaluateGradientVariance_params, self.train_seed)
if self.EvaluateGradientVariance_params['use_variational']:
var_model = get_initial_model(self.EvaluateGradientVariance_params, (self.train_seed + 1))
else:
var_model = train_model
grad_list_dict = {}
for each_param in train_model.params:
if train_model.params[each_param].requires_grad:
grad_list_dict[each_param] = []
for _ in range(self.EvaluateGradientVariance_params['n_epochs']):
obj_func = 0
for each_history in train_po_history_list:
obj_func = (obj_func + train_model.e_step_obj_func(each_history, variational_dist=var_model, **self.EvaluateGradientVariance_params['obj_func_kwargs']))
obj_func.backward()
for each_param in train_model.params:
if train_model.params[each_param].requires_grad:
grad_list_dict[each_param].append(deepcopy(train_model.params[each_param].grad.reshape((- 1))))
train_model.zero_grad()
return grad_list_dict |
class FILTERS(object):
def __init__(self, framework):
assert (framework in ['tensorflow', 'tensorflow_itex', 'keras', 'mxnet', 'onnxrt_qdq', 'pytorch', 'pytorch_ipex', 'pytorch_fx', 'onnxrt_integerops', 'onnxrt_qlinearops', 'onnxruntime']), 'framework support tensorflow pytorch mxnet onnxrt'
self.filters = framework_filters[framework]().filters
self.framework = framework
def __getitem__(self, filter_type):
assert (filter_type in self.filters.keys()), 'filter support {}'.format(self.filters.keys())
return self.filters[filter_type] |
_cache()
def split_request(start_dt: str, end_dt: str, player_id: int, url: str) -> pd.DataFrame:
current_dt = datetime.strptime(start_dt, '%Y-%m-%d')
end_dt_datetime = datetime.strptime(end_dt, '%Y-%m-%d')
results = []
player_id_str = str(player_id)
print('Gathering Player Data')
while (current_dt <= end_dt_datetime):
remaining = (end_dt_datetime - current_dt)
delta = min(remaining, timedelta(days=2190))
next_dt = (current_dt + delta)
start_str = current_dt.strftime('%Y-%m-%d')
end_str = next_dt.strftime('%Y-%m-%d')
data = requests.get(url.format(start_str, end_str, player_id_str))
df = pd.read_csv(io.StringIO(data.text))
results.append(df)
current_dt = (next_dt + timedelta(days=1))
return pd.concat(results) |
def save_data(data, loc, header):
df = pd.DataFrame(data=data, columns=header)
df.fillna('')
df.to_csv(loc, index=False, encoding='utf-8')
return None |
def parse_opts():
learning_policy = '2stream'
validate_policy = '2stream'
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', default='./data/', type=str, help='Root directory path of data')
parser.add_argument('--dataset_path', default='ori_data/', type=str, help='Directory path of Videos')
parser.add_argument('--result_path', default=(('results/' + time.strftime('%m%d-%H:%M_', time.localtime(time.time()))) + learning_policy), type=str, help='Result directory path')
parser.add_argument('--train_dataset', default='ucf_aug', type=str, help='')
parser.add_argument('--val_dataset', default=['ucf_aug', 'quva', 'yt_seg'], type=str, help='Used dataset (yt_seg | quva | ucf_aug)')
parser.add_argument('--no_train', action='store_true', help='If true, training is not performed.')
parser.set_defaults(no_train=False)
parser.add_argument('--no_val', action='store_true', help='If true, validation is not performed.')
parser.set_defaults(no_val=False)
parser.add_argument('--sample_duration', default=300, type=int, help='Temporal duration of training sample')
parser.add_argument('--mean_std_dataset', default='quva', type=str, help='')
parser.add_argument('--sample_size', default=112, type=int, help='Height and width of inputs')
parser.add_argument('--batch_size', default=24, type=int, help='Batch Size')
parser.add_argument('--val_batch_size', default=5, type=int, help='Batch Size')
parser.add_argument('--n_epochs', default=100, type=int, help='Number of total epochs to run')
parser.add_argument('--lr_patience', default=1, type=int, help='Patience of LR scheduler. See documentation of ReduceLROnPlateau.')
parser.add_argument('--begin_epoch', default=1, type=int, help='Training begins at this epoch. Previous trained model indicated by resume_path is loaded.')
parser.add_argument('--pretrain_path', default='', type=str)
parser.add_argument('--train_from_scratch', action='store_true')
parser.set_defaults(train_from_scratch=True)
parser.add_argument('--resume_path', default='', type=str, help='Save data (.pth) of previous training')
parser.add_argument('--checkpoint', default=1, type=int, help='Trained model is saved at every this epochs.')
parser.add_argument('--learning_policy', default=learning_policy, type=str, help='')
parser.add_argument('--validate_policy', default=validate_policy, type=str)
parser.add_argument('--optimizer', default='adam', type=str, help='Currently only support [adam, sgd]')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='Initial learning rate (divided by 10 while training by lr scheduler)')
parser.add_argument('--momentum', default=0.9, type=float, help='Momentum')
parser.add_argument('--weight_decay', default=0.001, type=float, help='Weight Decay')
parser.add_argument('--nesterov', action='store_true', help='Nesterov momentum')
parser.set_defaults(nesterov=False)
parser.add_argument('--dampening', default=0.9, type=float, help='dampening of SGD')
parser.add_argument('--basic_duration', default=32, type=float, help='Temporal duration of network input')
parser.add_argument('--l_context_ratio', default=1.0, type=float, help='')
parser.add_argument('--r_context_ratio', default=2.0, type=float, help='')
parser.add_argument('--norm_value', default=255, type=int, help='If 1, range of inputs is [0-255]. If 255, range of inputs is [0-1].')
parser.add_argument('--model', default='resnext', type=str, help='(resnet | resnext')
parser.add_argument('--model_depth', default=101, type=int, help='Depth of resnet (10 | 18 | 34 | 50 | 101)')
parser.add_argument('--resnet_shortcut', default='B', type=str, help='Shortcut type of resnet (A | B)')
parser.add_argument('--resnext_cardinality', default=32, type=int, help='ResNeXt cardinality')
parser.add_argument('--n_classes', default=7, type=int, help='[count, enlarge, narrow, miss]')
parser.add_argument('--anchors', default=[0.5, 0.67, 0.8, 1.0, 1.25, 1.5, 2.0], type=float)
parser.add_argument('--iou_ubound', default=0.5, type=float)
parser.add_argument('--iou_lbound', default=0.5, type=float)
parser.add_argument('--no_cuda', action='store_true', help='If true, cuda is not used.')
parser.set_defaults(no_cuda=False)
parser.add_argument('--n_threads', default=1, type=int, help='Number of threads for multi-thread loading')
parser.add_argument('--manual_seed', default=1, type=int, help='Manually set random seed')
parser.add_argument('--initial_scale', default=1.0, type=float, help='Initial scale for multiscale cropping')
parser.add_argument('--n_scales', default=1, type=int, help='Number of scales for multiscale cropping')
parser.add_argument('--scale_step', default=0., type=float, help='Scale step for multiscale cropping')
parser.add_argument('--train_crop', default='center', type=str, help='Spatial cropping method in training. random is uniform. corner is selection from 4 corners and 1 center. (random | corner | center)')
args = parser.parse_args()
return args |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.