code stringlengths 101 5.91M |
|---|
(name='x_boundary')
def boundary_domain():
points = geo.sample_boundary(100, param_ranges=time_range)
constraints = sc.Variables({'u': 0})
return (points, constraints) |
class RendezvousManager(metaclass=ABCMeta):
def __init__(self):
self._lock = Lock()
self._alive_nodes = set()
self._released_workers = []
self._waiting_nodes: Dict[(int, int)] = {}
self._rdzv_nodes = {}
self._lastcall_time = 0
self._rdzv_params = RendezvousParameters(0, 0)
self._rdzv_round = 0
self._node_unit = 1
self._name = ''
self._latest_rdzv_nodes = []
self._start_rdzv_ts = 0
self._node_rdzv_times: Dict[(int, int)] = {}
self._latest_log_nodes_time = 0
def get_rdzv_round(self):
return self._rdzv_round
def clear_waiting_nodes(self):
self._waiting_nodes.clear()
def add_alive_node(self, node: Node):
self._alive_nodes.add(node.id)
def remove_alive_node(self, node: Node):
if (node.id in self._alive_nodes):
self._alive_nodes.remove(node.id)
logger.info(f'Remove exited worker {node.name} from {self._name} rendezvous.')
self._waiting_nodes.pop(node.rank_index, 0)
def update_rdzv_params(self, min_nodes, max_ndoes, waiting_timeout, node_unit):
with self._lock:
if (self._rdzv_params.max_nodes == 0):
self._rdzv_params.min_nodes = min_nodes
self._rdzv_params.max_nodes = max_ndoes
self._rdzv_params.waiting_timeout = waiting_timeout
self._node_unit = node_unit
logger.info(f'{self._name} manager updates rdzv params: min_nodes={min_nodes}, max_nodes={max_ndoes}, waiting_timeout={waiting_timeout}, node_unit={node_unit}')
def _check_rdzv_completed(self):
rdzv_completed = False
waiting_num = len(self._waiting_nodes)
if (len(self._waiting_nodes) == self._rdzv_params.max_nodes):
rdzv_completed = True
else:
waiting_time = (time.time() - self._lastcall_time)
if ((waiting_num >= self._rdzv_params.min_nodes) and (waiting_time >= self._rdzv_params.waiting_timeout)):
rdzv_completed = True
waiting_num = ((waiting_num // self._node_unit) * self._node_unit)
if rdzv_completed:
node_ids = sorted(self._waiting_nodes.keys())[0:waiting_num]
self._rdzv_nodes = {}
for i in node_ids:
self._rdzv_nodes[i] = self._waiting_nodes[i]
self._latest_rdzv_nodes = list(self._rdzv_nodes.keys())
self._waiting_nodes = dict((set(self._waiting_nodes.items()) - set(self._rdzv_nodes.items())))
self._lastcall_time = 0
self._log_rendezvous_info()
if self._waiting_nodes:
logger.warning(f'Waiting nodes not in {self._rdzv_round} rendezvous are {self._waiting_nodes}.')
elif ((time.time() - self._latest_log_nodes_time) > 60):
self._latest_log_nodes_time = time.time()
logger.info(f'Waiting nodes in rendezvous are {self._waiting_nodes}')
return rdzv_completed
def _log_rendezvous_info(self):
logger.info(f'''Completed {self._rdzv_round} round rendezvous of {self._name} is {self._rdzv_nodes}
The times of nodes to join rendezvous are {self._node_rdzv_times}.''')
self._node_rdzv_times.clear()
if (self._start_rdzv_ts > 0):
rdzv_time = round((time.time() - self._start_rdzv_ts), 2)
logger.info(f'Elapsed time to complete the {self._rdzv_round} round rendzvous is {rdzv_time}s')
self._start_rdzv_ts = 0
def not_joined_rdzv_nodes(self):
nodes = []
if self._rdzv_nodes:
for node_id in self._alive_nodes:
if (node_id not in self._rdzv_nodes):
nodes.append(node_id)
return nodes
def join_rendezvous(self, node_rank, local_world_size):
with self._lock:
if (not self._waiting_nodes):
self._start_rdzv_ts = time.time()
logger.info(f'Start the {self._rdzv_round} round rendezvous.')
if (node_rank in self._waiting_nodes):
return self._rdzv_round
self._waiting_nodes[node_rank] = local_world_size
self._rdzv_nodes = {}
self._lastcall_time = time.time()
self._node_rdzv_times[node_rank] = round((self._lastcall_time - self._start_rdzv_ts), 2)
return self._rdzv_round
def num_nodes_waiting(self):
if self._has_node_restart():
return len(self._waiting_nodes)
elif (len(self._waiting_nodes) >= self._node_unit):
return len(self._waiting_nodes)
return 0
def _has_node_restart(self):
for node_rank in self._waiting_nodes.keys():
if (node_rank in self._latest_rdzv_nodes):
return True
return False
def get_comm_world(self, node_rank):
pass
def report_network_check_result(self, node_id: int, normal: bool, elapsed_time: float):
pass |
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
self._source_iter = iter(self._source)
for _ in range(len(self._source)):
item = next(self._source_iter)
self._queue.put(item)
self.count += 1
if ((self._max_len is not None) and (self.count >= self._max_len)):
break
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
del self._source_iter |
def iou_masks(mask1: Tensor, mask2: Tensor, n: int):
k = ((mask1 >= 0) & (mask1 < n))
inds = ((n * mask1[k].to(torch.int64)) + mask2[k])
mat = torch.bincount(inds, minlength=(n ** 2)).reshape(n, n)
iu = (torch.diag(mat) / (((mat.sum(1) + mat.sum(0)) - torch.diag(mat)) + 1e-06))
return iu.mean().item() |
def patch_norm_fp32(module):
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
module.forward = patch_forward_method(module.forward, torch.half, torch.float)
for child in module.children():
patch_norm_fp32(child)
return module |
def collate(samples):
(graphs, labels, gt_adjs) = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return (batched_graph, torch.cat(tuple(labels), 0), gt_adjs) |
class SplitBias(nn.Module):
def __init__(self, module):
super(SplitBias, self).__init__()
self.module = module
self.add_bias = AddBias(module.bias.data)
self.module.bias = None
def forward(self, input):
x = self.module(input)
x = self.add_bias(x)
return x |
def run_experiment(config):
exp_dir = ((((os.getcwd() + '/data/') + EXP_NAME) + '/') + config.get('exp_name', ''))
logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')
json.dump(config, open((exp_dir + '/params.json'), 'w'), indent=2, sort_keys=True, cls=ClassEncoder)
env = normalize(config['env'](reset_every_episode=True, task=config['task']))
dynamics_model = MLPDynamicsModel(name='dyn_model', env=env, learning_rate=config['learning_rate'], hidden_sizes=config['hidden_sizes'], valid_split_ratio=config['valid_split_ratio'], rolling_average_persitency=config['rolling_average_persitency'], hidden_nonlinearity=config['hidden_nonlinearity'], batch_size=config['batch_size'])
policy = MPCController(name='policy', env=env, dynamics_model=dynamics_model, discount=config['discount'], n_candidates=config['n_candidates'], horizon=config['horizon'], use_cem=config['use_cem'], num_cem_iters=config['num_cem_iters'])
sampler = Sampler(env=env, policy=policy, num_rollouts=config['num_rollouts'], max_path_length=config['max_path_length'], n_parallel=config['n_parallel'])
sample_processor = ModelSampleProcessor(recurrent=False)
algo = Trainer(env=env, policy=policy, dynamics_model=dynamics_model, sampler=sampler, sample_processor=sample_processor, n_itr=config['n_itr'], initial_random_samples=config['initial_random_samples'], dynamics_model_max_epochs=config['dynamic_model_epochs'])
algo.train() |
_module()
class Collect3D(object):
def __init__(self, keys, meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img', 'pad_shape', 'scale_factor', 'flip', 'cam_intrinsic', 'pcd_horizontal_flip', 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', 'img_norm_cfg', 'rect', 'Trv2c', 'P2', 'pcd_trans', 'sample_idx', 'pcd_scale_factor', 'pcd_rotation', 'pts_filename', 'transformation_3d_flow')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_metas = {}
for key in self.meta_keys:
if (key in results):
img_metas[key] = results[key]
data['img_metas'] = DC(img_metas, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})') |
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print('{}'.format(args).replace(', ', ',\n'))
device = torch.device(args.device)
seed = (args.seed + misc.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
if True:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
print(('Sampler_train = %s' % str(sampler_train)))
if args.dist_eval:
if ((len(dataset_val) % num_tasks) != 0):
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if ((global_rank == 0) and (args.log_dir is not None) and (not args.eval)):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
mixup_fn = None
mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None))
if mixup_active:
print('Mixup is activated!')
mixup_fn = Mixup(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes)
if ('pvt' in args.model):
model = models_pvt.__dict__[args.model](num_classes=args.nb_classes, drop_path_rate=args.drop_path)
elif ('swin' in args.model):
model = models_swin.__dict__[args.model](num_classes=args.nb_classes, drop_path_rate=args.drop_path)
else:
model = models_vit.__dict__[args.model](num_classes=args.nb_classes, drop_path_rate=args.drop_path, global_pool=args.global_pool)
if (args.finetune and (not args.eval)):
checkpoint = torch.load(args.finetune, map_location='cpu')
print(('Load pre-trained checkpoint from: %s' % args.finetune))
if ('model' in checkpoint):
checkpoint_model = checkpoint['model']
else:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if ((k in checkpoint_model) and (checkpoint_model[k].shape != state_dict[k].shape)):
print(f'Removing key {k} from pretrained checkpoint')
del checkpoint_model[k]
interpolate_pos_embed(model, checkpoint_model)
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'})
else:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias'})
trunc_normal_(model.head.weight, std=2e-05)
model.to(device)
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(('Model = %s' % str(model_without_ddp)))
print(('number of params (M): %.2f' % (n_parameters / 1000000.0)))
eff_batch_size = ((args.batch_size * args.accum_iter) * misc.get_world_size())
if (args.lr is None):
args.lr = ((args.blr * eff_batch_size) / 256)
print(('base lr: %.2e' % ((args.lr * 256) / eff_batch_size)))
print(('actual lr: %.2e' % args.lr))
print(('accumulate grad iterations: %d' % args.accum_iter))
print(('effective batch size: %d' % eff_batch_size))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay, no_weight_decay_list=model_without_ddp.no_weight_decay(), layer_decay=args.layer_decay)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if (mixup_fn is not None):
criterion = SoftTargetCrossEntropy()
elif (args.smoothing > 0.0):
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print(('criterion = %s' % str(criterion)))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f'Start training for {args.epochs} epochs')
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, mixup_fn, log_writer=log_writer, args=args)
if (args.output_dir and (((epoch % 25) == 0) or ((epoch + 1) == args.epochs))):
misc.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats['acc1'])
print(f'Max accuracy: {max_accuracy:.2f}%')
if (log_writer is not None):
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
def search_absorbe_bn(model, prev=None, remove_bn=True, verbose=False):
with torch.no_grad():
for m in model.children():
if (is_bn(m) and is_absorbing(prev)):
absorb_bn(prev, m, remove_bn=remove_bn, verbose=verbose)
search_absorbe_bn(m, remove_bn=remove_bn, verbose=verbose)
prev = m |
class DMCPInvertedResidual(USInvertedResidual):
def __init__(self, inplanes, outplanes, stride, t, expand):
super(DMCPInvertedResidual, self).__init__(inplanes, outplanes, stride, t, expand)
global Alpha
self.alpha1 = (Alpha((inplanes * t)) if (t != 1) else None)
self.alpha2 = (Alpha(outplanes) if (not ((stride == 1) and (inplanes == outplanes))) else None)
self.alpha_training = False
def forward(self, x):
residual = x
if (self.t != 1):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.alpha_training:
out = self.alpha1(out)
else:
out = x
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.alpha_training:
out = self.alpha1(out)
out = self.conv3(out)
out = self.bn3(out)
if self.alpha_training:
out = self.alpha2(out)
if ((self.stride == 1) and (self.inplanes == self.outplanes)):
out += residual
return out
def expected_flops(self, input_alpha, in_height, in_width):
if ((self.stride == 1) and (self.inplanes == self.outplanes) and (self.alpha2 is None)):
self.alpha2 = input_alpha
if ((self.t == 1) and (self.alpha1 is None)):
self.alpha1 = input_alpha
e_in_channel = input_alpha.expected_channel()
if (not (self.t == 1)):
e_conv1_out = self.alpha1.expected_channel()
(e_conv1_flops, out_height, out_width) = conv_compute_flops(self.conv1, in_height, in_width, e_in_channel, e_conv1_out)
else:
e_conv1_out = e_in_channel
e_conv1_flops = 0
out_height = in_height
out_width = in_width
e_conv3_out = self.alpha2.expected_channel()
(e_conv2_flops, out_height, out_width) = conv_compute_flops(self.conv2, out_height, out_width, e_conv1_out, e_conv1_out)
(e_conv3_flops, out_height, out_width) = conv_compute_flops(self.conv3, out_height, out_width, e_conv1_out, e_conv3_out)
e_flops = ((e_conv1_flops + e_conv2_flops) + e_conv3_flops)
return (e_flops, out_height, out_width)
def direct_sampling(self, ch_in):
if (self.t == 1):
ch_out = ch_in
else:
self.conv1.set_input_width(specific_ch=ch_in)
ch_out = self.alpha1.direct_sampling()
self.conv1.set_output_width(specific_ch=ch_out)
self.bn1.set_output_width(specific_ch=ch_out)
self.conv2.set_input_width(specific_ch=ch_out)
self.conv2.set_output_width(specific_ch=ch_out)
self.bn2.set_output_width(specific_ch=ch_out)
self.conv3.set_input_width(specific_ch=ch_out)
if ((self.stride == 1) and (self.inplanes == self.outplanes)):
ch_out = ch_in
else:
ch_out = self.alpha2.direct_sampling()
self.conv3.set_output_width(specific_ch=ch_out)
self.bn3.set_output_width(specific_ch=ch_out)
return ch_out
def expected_sampling(self, ch_in, e_ch_in):
if (self.t == 1):
(ch_out, e_ch) = (ch_in, e_ch_in)
else:
self.conv1.set_input_width(specific_ch=e_ch_in)
(ch_out, e_ch) = self.alpha1.expected_sampling()
self.conv1.set_output_width(specific_ch=e_ch)
self.bn1.set_output_width(specific_ch=e_ch)
self.conv2.set_input_width(specific_ch=e_ch)
self.conv2.set_output_width(specific_ch=e_ch)
self.bn2.set_output_width(specific_ch=e_ch)
self.conv3.set_input_width(specific_ch=e_ch)
if ((self.stride == 1) and (self.inplanes == self.outplanes)):
(ch_out, e_ch) = (ch_in, e_ch_in)
else:
(ch_out, e_ch) = self.alpha2.expected_sampling()
self.conv3.set_output_width(specific_ch=e_ch)
self.bn3.set_output_width(specific_ch=e_ch)
return (ch_out, e_ch) |
def ProcessReplaceIndexDescriptor(segment, parent_node_name, affix, edge_attributes=None):
dot_graph = []
label = 'ReplaceIndex({0}, {1})'.format(segment['arguments'][1], segment['arguments'][2])
style = None
if (edge_attributes is not None):
if ('label' in edge_attributes):
label = '{0} {1}'.format(edge_attributes['label'], label)
if ('style' in edge_attributes):
style = 'style={0}'.format(edge_attributes['style'])
attr_string = 'label="{0}"'.format(label)
if (style is not None):
attr_string += ' {0}'.format(style)
dot_graph.append('{0}->{1} [{2}]'.format(GetDotNodeName(segment['arguments'][0])['node'], GetDotNodeName(parent_node_name)['node'], attr_string))
if segment['sub_segments']:
raise Exception('ReplaceIndex can just deal with forwarding descriptor, no sub-segments allowed')
return dot_graph |
def read_e2e_files(path, tokenizer, lowdata_token=None):
file_dict = {}
with open(path, 'r') as f:
for line in f:
(src, tgt) = line.strip().split('||')
if (lowdata_token is None):
src = ' {} {}'.format(src, tokenizer.bos_token)
else:
src = ' {} {} {}'.format(lowdata_token, src, tokenizer.bos_token)
if (src not in file_dict):
file_dict[src] = []
file_dict[src].append(tgt)
return file_dict |
def _replace_relu(module):
reassign = {}
for (name, mod) in module.named_children():
_replace_relu(mod)
if ((type(mod) == nn.ReLU) or (type(mod) == nn.ReLU6)):
reassign[name] = nn.ReLU(inplace=False)
for (key, value) in reassign.items():
module._modules[key] = value |
def test_neither_x0_nor_initial_solutions_provided(archive_fixture):
(archive, _) = archive_fixture
sigma_g = 0.05
with pytest.raises(ValueError):
GradientOperatorEmitter(archive, sigma=1.0, sigma_g=sigma_g) |
def evaluate(trainer, datamodule, cfg, stage, is_eval_train=False):
test_res = dict()
try:
trainer.lightning_module.stage = cfg.stage
eval_dataloader = datamodule.eval_dataloader(cfg.evaluation.is_eval_on_test)
ckpt_path = cfg.evaluation[stage].ckpt_path
if (isinstance(datamodule.test_dataset, GalaxyDataset) and (stage == 'predictor')):
logger.warning('Testing on Galaxy test set.')
test_dataloader = datamodule.eval_dataloader(True)
model = load_best_ckpt(trainer, ckpt_path=ckpt_path)
test_preds = trainer.predict(model=model, dataloaders=test_dataloader)
test_res = kaggle_eval(predictions=test_preds, message='test_pred_{}'.format(datetime.now()))
else:
test_res = trainer.test(test_dataloaders=eval_dataloader, ckpt_path=ckpt_path)[0]
test_res = {k: v for (k, v) in test_res.items() if (f'/{cfg.stage}/' in k)}
log_dict(trainer, test_res, is_param=False)
test_res_rep = replace_keys(test_res, 'test/', '')
tosave = dict(test=test_res_rep)
if is_eval_train:
try:
train_dataloader = datamodule.train_dataloader()
train_res = trainer.test(test_dataloaders=train_dataloader, ckpt_path=ckpt_path)[0]
train_res = {k: v for (k, v) in train_res.items() if (f'/{cfg.stage}/' in k)}
tosave['train'] = replace_keys(train_res, 'test/', '')
except:
logger.exception('Failed to evaluate training set. Skipping this error:')
results = pd.DataFrame.from_dict(tosave)
filename = RESULTS_FILE.format(stage=stage)
path = (Path(cfg.paths.results) / filename)
results.to_csv(path, header=True, index=True)
logger.info(f'Logging results to {path}.')
except:
logger.exception('Failed to evaluate. Skipping this error:')
return test_res |
class _LRSchedulerStep(object):
def __init__(self, optimizer, last_step=(- 1)):
if (not isinstance(optimizer, Optimizer)):
raise TypeError('{} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
if (last_step == (- 1)):
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for (i, group) in enumerate(optimizer.param_groups):
if ('initial_lr' not in group):
raise KeyError("param 'initial_lr' is not specified in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups))
self.step((last_step + 1))
self.last_step = last_step
'\n def get_lr(self):\n raise NotImplementedError\n '
def get_lr(self):
ret = [self._get_lr_per_group(base_lr) for base_lr in self.base_lrs]
return ret
def _get_lr_per_group(self, base_lr):
raise NotImplementedError
def step(self, step=None):
if (step is None):
step = (self.last_step + 1)
self.last_step = step
for (param_group, lr) in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr |
_checkable
class TransformerLike(EstimatorLikeFit1, Protocol):
def fit(self, X: List[str], y: Optional[str]=None, **fit_params: Any) -> None:
pass
def transform(self, X: DataLike) -> DataLike:
return X
def fit_transform(self, X: DataLike, y: Optional[DataLike]=None) -> DataLike:
return X |
def keys_mapping_old_tea(checkpoint, k_new, replace_dict=[]):
k_old = checkpoint.keys()
for i in k_old:
if ('iSQRT' in i):
i_new = i.replace('iSQRT.', 'TCP.')
i_candidates = [i_new.replace('att_module.conv_1', 'TCP_att.TCA.g1'), i_new.replace('att_module.conv_2', 'TCP_att.TCA.g2'), i_new.replace('att_module.sp_att.conv_theta', 'TCP_att.TSA.conv_phi_1'), i_new.replace('att_module.sp_att.conv_phi', 'TCP_att.TSA.conv_phi_2'), i_new.replace('att_module.sp_att.conv_g', 'TCP_att.TSA.conv_phi0'), i_new.replace('att_module.sp_att', 'TCP_att.TSA'), i_new.replace('att_module', 'TCP_att'), i_new.replace('1.1.', '_bn1.'), i_new.replace('2.1.', '_bn2.'), i_new.replace('.1.', '.'), i_new.replace('.0.', '.')]
i_new = [i for i in i_candidates if (i in k_new)]
assert (len(i_new) != 0), ('invalid TCP layers' + i)
i_new = i_new[0]
replace_dict.append((i, i_new))
elif (i in k_new):
replace_dict.append((i, i))
else:
raise KeyError(('invalid resume layer ' + i))
for (k, k_new_i) in replace_dict:
v = checkpoint.pop(k)
checkpoint[k_new_i] = v
return checkpoint |
def sklearn_logistic_regression(dataname, train_embeds, train_labels, valid_embeds, valid_labels, test_embeds, test_labels, max_iter=None, tol=0.001, alpha=0.0001):
if (not isinstance(train_embeds, np.ndarray)):
train_embeds = train_embeds.asnumpy()
if (not isinstance(valid_embeds, np.ndarray)):
valid_embeds = valid_embeds.asnumpy()
if (not isinstance(test_embeds, np.ndarray)):
test_embeds = test_embeds.asnumpy()
if (dataname == 'ppi'):
classifier = MultiOutputClassifier(SGDClassifier(loss='log', alpha=alpha, n_jobs=(- 1), max_iter=max_iter, tol=tol))
classifier.fit(train_embeds, train_labels)
elif ((dataname == 'cora') or (dataname == 'reddit')):
classifier = SGDClassifier(loss='log', alpha=alpha, n_jobs=(- 1), max_iter=max_iter, tol=tol)
classifier.fit(train_embeds, train_labels)
else:
raise NotImplementedError
train_pred = classifier.predict(train_embeds)
valid_pred = classifier.predict(valid_embeds)
test_pred = classifier.predict(test_embeds)
train_acc = accuracy_score(y_true=train_labels.reshape(((- 1),)), y_pred=train_pred.reshape(((- 1),)))
valid_acc = accuracy_score(y_true=valid_labels.reshape(((- 1),)), y_pred=valid_pred.reshape(((- 1),)))
test_acc = accuracy_score(y_true=test_labels.reshape(((- 1),)), y_pred=test_pred.reshape(((- 1),)))
train_f1 = f1_score(y_true=train_labels, y_pred=train_pred, average='micro')
valid_f1 = f1_score(y_true=valid_labels, y_pred=valid_pred, average='micro')
test_f1 = f1_score(y_true=test_labels, y_pred=test_pred, average='micro')
return (train_acc, train_f1, valid_acc, valid_f1, test_acc, test_f1) |
def create_model(existing='', is_twohundred=False, is_halffeatures=True):
if (len(existing) == 0):
print('Loading base model (DenseNet)..')
if is_twohundred:
base_model = applications.DenseNet201(input_shape=(None, None, 3), include_top=False)
else:
base_model = applications.DenseNet169(input_shape=(None, None, 3), include_top=False)
print('Base model loaded.')
base_model_output_shape = base_model.layers[(- 1)].output.shape
for layer in base_model.layers:
layer.trainable = True
if is_halffeatures:
decode_filters = int((int(base_model_output_shape[(- 1)]) / 2))
else:
decode_filters = int(base_model_output_shape[(- 1)])
def upproject(tensor, filters, name, concat_with):
up_i = BilinearUpSampling2D((2, 2), name=(name + '_upsampling2d'))(tensor)
up_i = Concatenate(name=(name + '_concat'))([up_i, base_model.get_layer(concat_with).output])
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=(name + '_convA'))(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=(name + '_convB'))(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(base_model.output)
decoder = upproject(decoder, int((decode_filters / 2)), 'up1', concat_with='pool3_pool')
decoder = upproject(decoder, int((decode_filters / 4)), 'up2', concat_with='pool2_pool')
decoder = upproject(decoder, int((decode_filters / 8)), 'up3', concat_with='pool1')
decoder = upproject(decoder, int((decode_filters / 16)), 'up4', concat_with='conv1/relu')
if False:
decoder = upproject(decoder, int((decode_filters / 32)), 'up5', concat_with='input_1')
conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder)
model = Model(inputs=base_model.input, outputs=conv3)
else:
if (not existing.endswith('.h5')):
sys.exit('Please provide a correct model file when using [existing] argument.')
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function}
model = load_model(existing, custom_objects=custom_objects)
print('\nExisting model loaded.\n')
print('Model created.')
return model |
def require_datasets(test_case):
if (not _datasets_available):
return unittest.skip('test requires `datasets`')(test_case)
else:
return test_case |
def compute_error_rate(hyp_wrd_path, ref_wrd_path, unit='word'):
tokenize_line = {'word': (lambda x: re.sub(' \\(.*\\)$', '', x.rstrip()).split()), 'char': (lambda x: list(re.sub(' \\(.*\\)$', '', x.rstrip())))}.get(unit)
if (tokenize_line is None):
raise ValueError(f'{unit} not supported')
inds = [int(re.sub('\\D*(\\d*)\\D*', '\\1', line)) for line in open(hyp_wrd_path)]
hyps = [tokenize_line(line) for line in open(hyp_wrd_path)]
refs = [tokenize_line(line) for line in open(ref_wrd_path)]
assert (len(hyps) == len(refs))
err_rates = [(editdistance.eval(hyp, ref) / len(ref)) for (hyp, ref) in zip(hyps, refs)]
ind_to_err_rates = {i: e for (i, e) in zip(inds, err_rates)}
return ind_to_err_rates |
class FastFocalLoss(nn.Module):
def __init__(self):
super(FastFocalLoss, self).__init__()
def forward(self, out, target, ind, mask, cat):
mask = mask.float()
gt = torch.pow((1 - target), 4)
neg_loss = ((torch.log((1 - out)) * torch.pow(out, 2)) * gt)
neg_loss = neg_loss.sum()
pos_pred_pix = _transpose_and_gather_feat(out, ind)
pos_pred = pos_pred_pix.gather(2, cat.unsqueeze(2))
num_pos = mask.sum()
pos_loss = ((torch.log(pos_pred) * torch.pow((1 - pos_pred), 2)) * mask.unsqueeze(2))
pos_loss = pos_loss.sum()
if (num_pos == 0):
return (- neg_loss)
return ((- (pos_loss + neg_loss)) / num_pos) |
def compute_integrated_gradients(inp, baseline, net, target, n_steps=100):
path = [(baseline + (a * (inp - baseline))) for a in np.linspace(0, 1, n_steps)]
grads = [compute_gradient(func, x, net=net, target=target) for x in path]
ig = ((inp - baseline) * torch.cat(grads[:(- 1)]).mean(dim=0, keepdims=True))
return (ig, grads[(- 1)]) |
class EnvironmentCommand(BaseDiffusersCLICommand):
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser('env')
download_parser.set_defaults(func=info_command_factory)
def run(self):
hub_version = huggingface_hub.__version__
pt_version = 'not installed'
pt_cuda_available = 'NA'
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
transformers_version = 'not installed'
if is_transformers_available():
import transformers
transformers_version = transformers.__version__
accelerate_version = 'not installed'
if is_accelerate_available():
import accelerate
accelerate_version = accelerate.__version__
xformers_version = 'not installed'
if is_xformers_available():
import xformers
xformers_version = xformers.__version__
info = {'`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>'}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(info))
return info
def format_dict(d):
return ('\n'.join([f'- {prop}: {val}' for (prop, val) in d.items()]) + '\n') |
class JavaValue(object):
def jvm_class_constructor(self):
name = ('create' + self.__class__.__name__)
print(('creating: ' + name))
return name
def __init__(self, jvalue, bigdl_type, *args):
self.value = (jvalue if jvalue else callBigDlFunc(bigdl_type, self.jvm_class_constructor(), *args))
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString() |
def WriteToFile(file_path, locations, scales, descriptors, attention, orientations=None):
serialized_data = SerializeToString(locations, scales, descriptors, attention, orientations)
with tf.gfile.FastGFile(file_path, 'w') as f:
f.write(serialized_data) |
def test_amateur_draft(bref_get_monkeypatch: Callable, sample_html: str, sample_processed_result: pd.DataFrame) -> None:
expected_url = _URL.format(year=2019, draft_round=1)
bref_get_monkeypatch(sample_html, expected_url)
result = amateur_draft(2019, 1)
assert (result is not None)
assert (not result.empty)
pd.testing.assert_frame_equal(result, sample_processed_result, check_dtype=False) |
def _check_spark_version(sc, report_warn):
version_info = _get_bigdl_verion_conf()
(c_major, c_feature, c_maintenance) = _split_full_version(version_info['spark_version'])
(r_major, r_feature, r_maintenance) = _split_full_version(sc.version)
error_message = ('\n The compile time spark version is not compatible with the spark runtime version.\n Compile time version is %s, runtime version is %s. If you want bypass this check,\n please set spark.analytics.zoo.versionCheck to false, and if you want to only report\n an warning message, please set spark.analytics.zoo.versionCheck.warning to true.\n ' % (version_info['spark_version'], sc.version))
if (c_major != r_major):
if (not report_warn):
invalidInputError(False, error_message)
else:
warnings.warn(error_message)
elif (not ((c_maintenance == r_maintenance) and (c_feature == r_feature))):
warnings.warn(((('The compile time spark version may not compatible with ' + 'the Spark runtime version. ') + ('Compile time version is %s, ' % version_info['spark_version'])) + ('runtime version is %s' % sc.version))) |
def expand_dim(x: ty.T, /, num: ty.U[(int, ty.S[int])], dim: ty.U[(int, ty.S[int])]=0, insert: bool=False) -> ty.T:
if isinstance(num, int):
if isinstance(dim, int):
(num, dim) = ([num], [dim])
else:
num = ([num] * len(dim))
elif (len(num) != len(dim)):
raise ValueError(f'Non-matching expansion and dims. ({len(num)} vs. {len(dim)})')
for d in (dim if insert else ()):
x = x.unsqueeze(d)
sizes = ([(- 1)] * x.ndim)
for (n, d) in zip(num, dim):
sizes[d] = n
return x.expand(sizes) |
class PR(ExplainerMixin):
available_explanations = ['perf']
explainer_type = 'perf'
def __init__(self, model, feature_names=None, feature_types=None):
self.model = model
self.feature_names = feature_names
self.feature_types = feature_types
def explain_perf(self, X, y, name=None):
if (name is None):
name = gen_name_from_class(self)
y = clean_dimensions(y, 'y')
if (y.ndim != 1):
raise ValueError('y must be 1 dimensional')
(X, n_samples) = preclean_X(X, self.feature_names, self.feature_types, len(y))
(predict_fn, n_classes, classes) = determine_classes(self.model, X, n_samples)
if (n_classes != 2):
raise Exception('Only binary classification supported in the PR class')
predict_fn = unify_predict_fn(predict_fn, X, 1)
(X, feature_names, feature_types) = unify_data(X, n_samples, self.feature_names, self.feature_types, True, 0)
y = typify_classification(y)
if (classes is None):
(classes, y) = np.unique(y, return_inverse=True)
if (len(classes) != n_classes):
raise ValueError('class number mismatch')
else:
invert_classes = dict(zip(classes, count()))
y = np.array([invert_classes[el] for el in y], dtype=np.int64)
scores = predict_fn(X)
(precision, recall, thresh) = precision_recall_curve(y, scores)
ap = average_precision_score(y, scores)
abs_residuals = np.abs((y - scores))
(counts, values) = np.histogram(abs_residuals, bins='doane')
overall_dict = {'type': 'perf_curve', 'density': {'names': values, 'scores': counts}, 'scores': scores, 'x_values': recall, 'y_values': precision, 'threshold': thresh, 'auc': ap}
internal_obj = {'overall': overall_dict, 'specific': None}
return PRExplanation('perf', internal_obj, feature_names=feature_names, feature_types=feature_types, name=name) |
def nms(dets, thresh, force_cpu=False):
if (dets.shape[0] == 0):
return []
if force_cpu:
return cpu_nms(dets, thresh)
return gpu_nms(dets, thresh) |
class ProcessorMixin(PushToHubMixin):
attributes = ['feature_extractor', 'tokenizer']
feature_extractor_class = None
tokenizer_class = None
_auto_class = None
def __init__(self, *args, **kwargs):
for key in kwargs:
if (key not in self.attributes):
raise TypeError(f'Unexpected keyword argument {key}.')
for (arg, attribute_name) in zip(args, self.attributes):
if (attribute_name in kwargs):
raise TypeError(f'Got multiple values for argument {attribute_name}.')
else:
kwargs[attribute_name] = arg
if (len(kwargs) != len(self.attributes)):
raise ValueError(f"This processor requires {len(self.attributes)} arguments: {', '.join(self.attributes)}. Got {len(args)} arguments instead.")
for (attribute_name, arg) in kwargs.items():
class_name = getattr(self, f'{attribute_name}_class')
class_name = AUTO_TO_BASE_CLASS_MAPPING.get(class_name, class_name)
if isinstance(class_name, tuple):
proper_class = tuple((getattr(transformers_module, n) for n in class_name if (n is not None)))
else:
proper_class = getattr(transformers_module, class_name)
if (not isinstance(arg, proper_class)):
raise ValueError(f'Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected.')
setattr(self, attribute_name, arg)
def __repr__(self):
attributes_repr = [f'- {name}: {repr(getattr(self, name))}' for name in self.attributes]
attributes_repr = '\n'.join(attributes_repr)
return f'''{self.__class__.__name__}:
{attributes_repr}'''
def save_pretrained(self, save_directory, push_to_hub: bool=False, **kwargs):
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[(- 1)])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
if (self._auto_class is not None):
attrs = [getattr(self, attribute_name) for attribute_name in self.attributes]
configs = [(a.init_kwargs if isinstance(a, PreTrainedTokenizerBase) else a) for a in attrs]
custom_object_save(self, save_directory, config=configs)
for attribute_name in self.attributes:
attribute = getattr(self, attribute_name)
if hasattr(attribute, '_set_processor_class'):
attribute._set_processor_class(self.__class__.__name__)
attribute.save_pretrained(save_directory)
if (self._auto_class is not None):
for attribute_name in self.attributes:
attribute = getattr(self, attribute_name)
if isinstance(attribute, PreTrainedTokenizerBase):
del attribute.init_kwargs['auto_map']
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('use_auth_token'))
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
return cls(*args)
def register_for_auto_class(cls, auto_class='AutoProcessor'):
if (not isinstance(auto_class, str)):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if (not hasattr(auto_module, auto_class)):
raise ValueError(f'{auto_class} is not a valid auto class.')
cls._auto_class = auto_class
def _get_arguments_from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
args = []
for attribute_name in cls.attributes:
class_name = getattr(cls, f'{attribute_name}_class')
if isinstance(class_name, tuple):
classes = tuple(((getattr(transformers_module, n) if (n is not None) else None) for n in class_name))
use_fast = kwargs.get('use_fast', True)
if (use_fast and (classes[1] is not None)):
attribute_class = classes[1]
else:
attribute_class = classes[0]
else:
attribute_class = getattr(transformers_module, class_name)
args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs))
return args
def model_input_names(self):
first_attribute = getattr(self, self.attributes[0])
return getattr(first_attribute, 'model_input_names', None) |
def normalize_angle_deg(angle):
import math
while (angle < 0):
angle += 360
angle = math.fmod(angle, 360.0)
return angle |
def generate(prompt, topic, affect, knob):
knob /= 100
print('Recieved request\n', 'Prompt: ', prompt, 'topic: ', topic, 'affect: ', affect, 'knob: ', knob)
if ((prompt == 'Enter prefix') or (prompt == '')):
return ('', False)
emit('word', {'value': 'Generating...'}, broadcast=True)
result = run_pplm_example(affect_weight=1, knob=knob, cond_text=prompt, num_samples=1, bag_of_words=topic, bag_of_words_affect=affect, length=500, stepsize=0.01, sample=True, num_iterations=3, window_length=5, gamma=1.5, gm_scale=0.95, kl_scale=0.01, verbosity='quiet')
print(result)
return (result, True) |
class Speedometer(object):
def __init__(self, batch_size, frequent=50):
self.batch_size = batch_size
self.frequent = frequent
self.init = False
self.tic = 0
self.last_count = 0
def __call__(self, param):
count = param.nbatch
if (self.last_count > count):
self.init = False
self.last_count = count
if self.init:
if ((count % self.frequent) == 0):
speed = ((self.frequent * self.batch_size) / (time.time() - self.tic))
s = ''
if (param.eval_metric is not None):
(name, value) = param.eval_metric.get()
s = ('Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-' % (param.epoch, count, speed))
for (n, v) in zip(name, value):
s += ('%s=%f,\t' % (n, v))
else:
s = ('Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec' % (param.epoch, count, speed))
logging.info(s)
print(s)
self.tic = time.time()
else:
self.init = True
self.tic = time.time() |
class ZDT1Modified(FloatProblem):
def __init__(self, number_of_variables: int=30):
super(ZDT1Modified, self).__init__()
self.number_of_variables = number_of_variables
self.number_of_objectives = 2
self.number_of_constraints = 0
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)']
self.lower_bound = (self.number_of_variables * [0.0])
self.upper_bound = (self.number_of_variables * [1.0])
def evaluate(self, solution: FloatSolution) -> FloatSolution:
g = self.__eval_g(solution)
h = self.__eval_h(solution.variables[0], g)
solution.objectives[0] = solution.variables[0]
solution.objectives[1] = (h * g)
s: float = 0.0
for i in range(1000):
for j in range(1000):
s += (((i * 0.235) / 1.234) + (1.23525 * j))
return solution
def __eval_g(self, solution: FloatSolution):
g = (sum(solution.variables) - solution.variables[0])
constant = (9.0 / (solution.number_of_variables - 1))
g = (constant * g)
g = (g + 1.0)
return g
def __eval_h(self, f: float, g: float) -> float:
return (1.0 - sqrt((f / g)))
def get_name(self):
return 'ZDT1' |
class Angrop():
def __init__(self, binary, input, job, ropchain, bad_chars):
self.binary = binary
self.input = input
self.job = job
self.logger = job.logger
self.ropchain = ropchain
self.bad_chars = bad_chars
def run(self, timeout):
from os.path import abspath, dirname, join
runner = abspath(join(dirname(__file__), 'angrop_runner.py'))
cmd = ['/usr/bin/python3', runner, self.binary, self.ropchain]
if self.bad_chars:
cmd += [self.bad_chars]
self.logger.debug('RUN angrop runner {}'.format(' '.join(cmd)))
process = Popen(cmd, stderr=STDOUT, stdout=PIPE)
try:
stdout = process.communicate(timeout=timeout)[0]
self.logger.debug('angrop runner output:')
self.logger.debug(stdout.decode(errors='ignore'))
except TimeoutExpired:
process.kill()
self.logger.critical('FAIL TIMEOUT')
exit(3)
if (process.returncode != 0):
self.logger.error('Compilation ERROR with {} (angrop)'.format(process.returncode))
exit(1) |
.dataclass
class FlaxImagePipelineOutput(BaseOutput):
images: Union[(List[PIL.Image.Image], np.ndarray)] |
class Entry(object):
def __init__(self, value, new_value_type):
self.value = value
self.new_value_type = new_value_type
def update(self, new_value):
if self.new_value_type:
assert isinstance(new_value, self.new_value_type), f'{new_value}, {self.new_value_type}'
self.value += self.preprocess(new_value)
def preprocess(self, value):
return value |
def simxUnpackInts(intsPackedInString):
b = []
for i in range(int((len(intsPackedInString) / 4))):
b.append(struct.unpack('<i', intsPackedInString[(4 * i):(4 * (i + 1))])[0])
return b |
def make_output_directory(output_dir, model_name, multiple_model_mode):
if multiple_model_mode:
prediction_dir = os.path.join(output_dir, 'predictions', model_name)
else:
prediction_dir = os.path.join(output_dir, 'predictions')
os.makedirs(prediction_dir, exist_ok=True)
return prediction_dir |
def main():
args = parse_args()
handler = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[handler])
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state)
logger.setLevel((logging.INFO if accelerator.is_local_main_process else logging.ERROR))
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
if (args.dataset_name is not None):
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if args.debug:
for split in raw_datasets.keys():
raw_datasets[split] = raw_datasets[split].select(range(100))
if (raw_datasets['train'] is not None):
column_names = raw_datasets['train'].column_names
features = raw_datasets['train'].features
else:
column_names = raw_datasets['validation'].column_names
features = raw_datasets['validation'].features
if (args.text_column_name is not None):
text_column_name = args.text_column_name
elif ('tokens' in column_names):
text_column_name = 'tokens'
else:
text_column_name = column_names[0]
if (args.label_column_name is not None):
label_column_name = args.label_column_name
elif (f'{args.task_name}_tags' in column_names):
label_column_name = f'{args.task_name}_tags'
else:
label_column_name = column_names[1]
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
else:
label_list = get_label_list(raw_datasets['train'][label_column_name])
num_labels = len(label_list)
b_to_i_label = []
for (idx, label) in enumerate(label_list):
if (label.startswith('B-') and (label.replace('B-', 'I-') in label_list)):
b_to_i_label.append(label_list.index(label.replace('B-', 'I-')))
else:
b_to_i_label.append(idx)
if args.config_name:
config = LukeConfig.from_pretrained(args.config_name, num_labels=num_labels)
elif args.model_name_or_path:
config = LukeConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels)
else:
logger.warning('You are instantiating a new config instance from scratch.')
tokenizer_name_or_path = (args.tokenizer_name if args.tokenizer_name else args.model_name_or_path)
if (not tokenizer_name_or_path):
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
tokenizer = LukeTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=False, task='entity_span_classification', max_entity_length=args.max_entity_length, max_mention_length=args.max_mention_length)
if args.model_name_or_path:
model = LukeForEntitySpanClassification.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
else:
logger.info('Training new model from scratch')
model = LukeForEntitySpanClassification.from_config(config)
model.resize_token_embeddings(len(tokenizer))
padding = ('max_length' if args.pad_to_max_length else False)
def compute_sentence_boundaries_for_luke(examples):
sentence_boundaries = []
for tokens in examples[text_column_name]:
sentence_boundaries.append([0, len(tokens)])
examples['sentence_boundaries'] = sentence_boundaries
return examples
def compute_entity_spans_for_luke(examples):
all_entity_spans = []
texts = []
all_labels_entity_spans = []
all_original_entity_spans = []
for (labels, tokens, sentence_boundaries) in zip(examples[label_column_name], examples[text_column_name], examples['sentence_boundaries']):
subword_lengths = [len(tokenizer.tokenize(token)) for token in tokens]
total_subword_length = sum(subword_lengths)
(_, context_end) = sentence_boundaries
if (total_subword_length > (args.max_length - 2)):
cur_length = sum(subword_lengths[:context_end])
idx = (context_end - 1)
while (cur_length > (args.max_length - 2)):
cur_length -= subword_lengths[idx]
context_end -= 1
idx -= 1
text = ''
sentence_words = tokens[:context_end]
sentence_subword_lengths = subword_lengths[:context_end]
word_start_char_positions = []
word_end_char_positions = []
labels_positions = {}
for (word, label) in zip(sentence_words, labels):
if ((word[0] == "'") or ((len(word) == 1) and is_punctuation(word))):
text = text.rstrip()
word_start_char_positions.append(len(text))
text += word
word_end_char_positions.append(len(text))
text += ' '
labels_positions[(word_start_char_positions[(- 1)], word_end_char_positions[(- 1)])] = label
text = text.rstrip()
texts.append(text)
entity_spans = []
labels_entity_spans = []
original_entity_spans = []
for word_start in range(len(sentence_words)):
for word_end in range(word_start, len(sentence_words)):
if ((sum(sentence_subword_lengths[word_start:word_end]) <= tokenizer.max_mention_length) and (len(entity_spans) < tokenizer.max_entity_length)):
entity_spans.append((word_start_char_positions[word_start], word_end_char_positions[word_end]))
original_entity_spans.append((word_start, (word_end + 1)))
if ((word_start_char_positions[word_start], word_end_char_positions[word_end]) in labels_positions):
labels_entity_spans.append(labels_positions[(word_start_char_positions[word_start], word_end_char_positions[word_end])])
else:
labels_entity_spans.append(0)
all_entity_spans.append(entity_spans)
all_labels_entity_spans.append(labels_entity_spans)
all_original_entity_spans.append(original_entity_spans)
examples['entity_spans'] = all_entity_spans
examples['text'] = texts
examples['labels_entity_spans'] = all_labels_entity_spans
examples['original_entity_spans'] = all_original_entity_spans
return examples
def tokenize_and_align_labels(examples):
entity_spans = []
for v in examples['entity_spans']:
entity_spans.append(list(map(tuple, v)))
tokenized_inputs = tokenizer(examples['text'], entity_spans=entity_spans, max_length=args.max_length, padding=padding, truncation=True)
if (padding == 'max_length'):
tokenized_inputs['labels'] = padding_tensor(examples['labels_entity_spans'], (- 100), tokenizer.padding_side, tokenizer.max_entity_length)
tokenized_inputs['original_entity_spans'] = padding_tensor(examples['original_entity_spans'], ((- 1), (- 1)), tokenizer.padding_side, tokenizer.max_entity_length)
tokenized_inputs[label_column_name] = padding_tensor(examples[label_column_name], (- 1), tokenizer.padding_side, tokenizer.max_entity_length)
else:
tokenized_inputs['labels'] = [ex[:tokenizer.max_entity_length] for ex in examples['labels_entity_spans']]
tokenized_inputs['original_entity_spans'] = [ex[:tokenizer.max_entity_length] for ex in examples['original_entity_spans']]
tokenized_inputs[label_column_name] = [ex[:tokenizer.max_entity_length] for ex in examples[label_column_name]]
return tokenized_inputs
with accelerator.main_process_first():
raw_datasets = raw_datasets.map(compute_sentence_boundaries_for_luke, batched=True, desc='Adding sentence boundaries')
raw_datasets = raw_datasets.map(compute_entity_spans_for_luke, batched=True, desc='Adding sentence spans')
processed_raw_datasets = raw_datasets.map(tokenize_and_align_labels, batched=True, remove_columns=raw_datasets['train'].column_names, desc='Running tokenizer on dataset')
train_dataset = processed_raw_datasets['train']
eval_dataset = processed_raw_datasets['validation']
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForLukeTokenClassification(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
device = accelerator.device
model.to(device)
(model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
metric = load_metric('seqeval')
def get_luke_labels(outputs, ner_tags, original_entity_spans):
true_predictions = []
true_labels = []
for (output, original_spans, tags) in zip(outputs.logits, original_entity_spans, ner_tags):
true_tags = [val for val in tags if (val != (- 1))]
true_original_spans = [val for val in original_spans if (val != ((- 1), (- 1)))]
max_indices = torch.argmax(output, axis=1)
max_logits = torch.max(output, axis=1).values
predictions = []
for (logit, index, span) in zip(max_logits, max_indices, true_original_spans):
if (index != 0):
predictions.append((logit, span, label_list[index]))
predicted_sequence = ([label_list[0]] * len(true_tags))
for (_, span, label) in sorted(predictions, key=(lambda o: o[0]), reverse=True):
if all([(o == label_list[0]) for o in predicted_sequence[span[0]:span[1]]]):
predicted_sequence[span[0]] = label
if ((span[1] - span[0]) > 1):
predicted_sequence[(span[0] + 1):span[1]] = ([label] * ((span[1] - span[0]) - 1))
true_predictions.append(predicted_sequence)
true_labels.append([label_list[tag_id] for tag_id in true_tags])
return (true_predictions, true_labels)
def compute_metrics():
results = metric.compute()
if args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for (step, batch) in enumerate(train_dataloader):
_ = batch.pop('original_entity_spans')
outputs = model(**batch)
loss = outputs.loss
loss = (loss / args.gradient_accumulation_steps)
accelerator.backward(loss)
if (((step % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (completed_steps >= args.max_train_steps):
break
model.eval()
for (step, batch) in enumerate(eval_dataloader):
original_entity_spans = batch.pop('original_entity_spans')
with torch.no_grad():
outputs = model(**batch)
(preds, refs) = get_luke_labels(outputs, batch[label_column_name], original_entity_spans)
metric.add_batch(predictions=preds, references=refs)
eval_metric = compute_metrics()
accelerator.print(f'epoch {epoch}:', eval_metric)
if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True) |
def load_test_data(args, logger, processor, task_name, label_list, tokenizer, output_mode, k=None):
if (task_name == 'vua'):
eval_examples = processor.get_test_examples(args.data_dir)
elif (task_name == 'trofi'):
eval_examples = processor.get_test_examples(args.data_dir, k)
else:
raise "task_name should be 'vua' or 'trofi'!"
if (args.model_type == 'BERT_BASE'):
eval_features = convert_two_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if (args.model_type in ['BERT_SEQ', 'MELBERT_SPV']):
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode, args)
if (args.model_type in ['MELBERT_MIP', 'MELBERT']):
eval_features = convert_examples_to_two_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode, args)
logger.info('***** Running evaluation *****')
if (args.model_type in ['MELBERT_MIP', 'MELBERT']):
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_guids = [f.guid for f in eval_features]
all_idx = torch.tensor([i for i in range(len(eval_features))], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_input_ids_2 = torch.tensor([f.input_ids_2 for f in eval_features], dtype=torch.long)
all_input_mask_2 = torch.tensor([f.input_mask_2 for f in eval_features], dtype=torch.long)
all_segment_ids_2 = torch.tensor([f.segment_ids_2 for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_idx, all_input_ids_2, all_input_mask_2, all_segment_ids_2)
else:
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_guids = [f.guid for f in eval_features]
all_idx = torch.tensor([i for i in range(len(eval_features))], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_idx)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
return (all_guids, eval_dataloader) |
class Videodatasets_RGBD(Dataset):
def __init__(self, dataset_root, ground_truth1, typ1, ground_truth2, typ2, sample_duration=16, phase='train'):
def get_data_list_and_label(data_df, typ):
T = 0
return [(lambda arr: ('/'.join(arr[T].split('/')[1:]), int(arr[1]), int(arr[2])))(i[:(- 1)].split(' ')) for i in open(data_df).readlines()]
self.dataset_root = dataset_root
self.sample_duration = sample_duration
self.phase = phase
(self.typ1, self.typ2) = (typ1, typ2)
self.transform = transforms.Compose([Normaliztion(), transforms.ToTensor()])
lines = filter((lambda x: (x[1] > 7)), get_data_list_and_label(ground_truth1, typ1))
lines2 = filter((lambda x: (x[1] > 7)), get_data_list_and_label(ground_truth2, typ2))
self.inputs = list(lines)
self.inputs2 = list(lines2)
def transform_params(self, resize=(320, 240), crop_size=224, flip=0.5):
if (self.phase == 'train'):
(left, top) = (random.randint(0, (resize[0] - crop_size)), random.randint(0, (resize[1] - crop_size)))
is_flip = (True if (random.uniform(0, 1) > flip) else False)
else:
(left, top) = (((resize[0] - crop_size) // 2), ((resize[1] - crop_size) // 2))
is_flip = False
return ((left, top, (left + crop_size), (top + crop_size)), is_flip)
def __getitem__(self, index):
resize = (320, 240)
(crop_rect, is_flip) = self.transform_params(resize=resize, flip=1.0)
def image_to_np(image):
image_np = np.empty([image.channels, image.height, image.width], dtype=np.uint8)
image.copyto(image_np)
image_np = np.transpose(image_np, (1, 2, 0))
return image_np
def transform(img):
img = img.resize(resize)
img = img.crop(crop_rect)
if is_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return np.array(img.resize((112, 112)))
def Sample_Image(imgs_path, sl):
frams = []
for a in sl:
img = transform(Image.open(os.path.join(imgs_path, ('%06d.jpg' % a))))
frams.append(self.transform(img).view(3, 112, 112, 1))
return torch.cat(frams, dim=3).type(torch.FloatTensor)
sn = self.sample_duration
if (self.phase == 'train'):
f = (lambda n: [(lambda n, arr: (n if (arr == []) else random.choice(arr)))(((n * i) / sn), range(int(((n * i) / sn)), max((int(((n * i) / sn)) + 1), int(((n * (i + 1)) / sn))))) for i in range(sn)])
else:
f = (lambda n: [(lambda n, arr: (n if (arr == []) else int(np.mean(arr))))(((n * i) / sn), range(int(((n * i) / sn)), max((int(((n * i) / sn)) + 1), int(((n * (i + 1)) / sn))))) for i in range(sn)])
sl = f(self.inputs2[index][1])
data_path = os.path.join('/'.join(self.dataset_root.split('/')[:(- 1)]), self.typ1, self.phase, '/'.join(self.inputs[index][0].split('/')[(- 3):]))
clip = Sample_Image(data_path, sl)
data_path2 = os.path.join('/'.join(self.dataset_root.split('/')[:(- 1)]), self.typ2, self.phase, '/'.join(self.inputs2[index][0].split('/')[(- 3):]))
clip2 = Sample_Image(data_path2, sl)
assert (self.inputs[index][2] == self.inputs2[index][2])
return (clip.permute(0, 3, 1, 2), self.inputs[index][2], clip2.permute(0, 3, 1, 2))
def __len__(self):
return len(self.inputs2) |
def WideResNet(nbfilters, nbblocks, dropout, weight_decay, nb_classes, batchnorm_training=True, use_bias=True):
if (K.image_data_format() == 'channels_last'):
input_model = Input(shape=(32, 32, 3))
channel_axis = (- 1)
elif (K.image_data_format() == 'channels_first'):
input_model = Input(shape=(3, 32, 32))
channel_axis = 1
x = ZeroPadding2D((1, 1))(input_model)
x = Conv2D(16, (3, 3), kernel_regularizer=l2(weight_decay), use_bias=use_bias)(x)
x = stage(x, nbfilters[0], nbblocks[0], dropout, weight_decay, channel_axis, subsample=False, batchnorm_training=batchnorm_training, use_bias=use_bias)
x = stage(x, nbfilters[1], nbblocks[1], dropout, weight_decay, channel_axis, batchnorm_training=batchnorm_training, use_bias=use_bias)
x = stage(x, nbfilters[2], nbblocks[2], dropout, weight_decay, channel_axis, batchnorm_training=batchnorm_training, use_bias=use_bias)
x = BatchNormalization(axis=channel_axis, center=batchnorm_training, scale=batchnorm_training)(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, kernel_regularizer=l2(weight_decay), use_bias=use_bias)(x)
x = Activation('softmax')(x)
return Model(input_model, x) |
class CTCTrainer(Trainer):
def training_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])]) -> torch.Tensor:
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if (self.args.n_gpu > 1):
if (model.module.config.ctc_loss_reduction == 'mean'):
loss = loss.mean()
elif (model.module.config.ctc_loss_reduction == 'sum'):
loss = (loss.sum() / (inputs['labels'] >= 0).sum())
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if (self.args.gradient_accumulation_steps > 1):
loss = (loss / self.args.gradient_accumulation_steps)
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach() |
('Correlation')
def _correlation_grad_cc(op, grad):
return correlation_grad_module.correlation_grad(grad, op.inputs[0], op.inputs[1], stride=op.get_attr('stride'), max_displacement=op.get_attr('max_displacement')) |
class PSPModule(nn.Module):
def __init__(self, features, out_features=1024, sizes=(1, 2, 4, 8)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([C(features, features, 3, 1, groups=features) for size in sizes])
self.project = CBR((features * (len(sizes) + 1)), out_features, 1, 1)
def forward(self, feats):
(h, w) = (feats.size(2), feats.size(3))
out = [feats]
for stage in self.stages:
feats = F.avg_pool2d(feats, kernel_size=3, stride=2, padding=1)
upsampled = F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True)
out.append(upsampled)
return self.project(torch.cat(out, dim=1)) |
(name='left_boundary2')
class LeftBoundary2(sc.SampleDomain):
def sampling(self, *args, **kwargs):
return (Line.sample_boundary(100, sieve=sp.Eq(x, 0)), {'d_y': 0}) |
def _wrapper_count_operators(model: nn.Module, inputs: list, mode: str, **kwargs) -> typing.DefaultDict[(str, float)]:
supported_ops = {k: (lambda *args, **kwargs: {}) for k in _IGNORED_OPS}
supported_ops.update(kwargs.pop('supported_ops', {}))
kwargs['supported_ops'] = supported_ops
assert (len(inputs) == 1), 'Please use batch size=1'
tensor_input = inputs[0]['image']
class WrapModel(nn.Module):
def __init__(self, model):
super().__init__()
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
self.model = model.module
else:
self.model = model
def forward(self, image):
inputs = [{'image': image}]
outputs = self.model.forward(inputs)
return _flatten_to_tuple(outputs)
old_train = model.training
with torch.no_grad():
if (mode == FLOPS_MODE):
ret = flop_count(WrapModel(model).train(False), (tensor_input,), **kwargs)
elif (mode == ACTIVATIONS_MODE):
ret = activation_count(WrapModel(model).train(False), (tensor_input,), **kwargs)
else:
raise NotImplementedError('Count for mode {} is not supported yet.'.format(mode))
if isinstance(ret, tuple):
ret = ret[0]
model.train(old_train)
return ret |
def gaussian_noise(tensor, mean=0, stddev=0.1):
noise = torch.nn.init.normal(torch.Tensor(tensor.size()), 0, 0.1)
return Variable((tensor + noise)) |
def add_preds(df_county, NUM_DAYS_LIST=[1, 2, 3], verbose=False, cached_dir=None, outcomes=['Deaths', 'Cases'], discard=False, d=datetime.datetime.today(), add_predict_interval=True, interval_target_days=[], expanded_shared_time_truncation=0.1, expanded_shared_max_days=365, force_predict=False):
print('adding preds....')
advanced_model = {'model_type': 'advanced_shared_model'}
linear = {'model_type': 'linear'}
BEST_MODEL = [advanced_model, linear]
if (cached_dir is not None):
print('\tcached dir', cached_dir)
if (not discard):
cached_fname = oj(cached_dir, f'preds_{d.month}_{d.day}_cached.pkl')
else:
cached_fname = oj(cached_dir, f'preds_{d.month}_{d.day}_cached_discard1day.pkl')
if os.path.exists(cached_fname):
return pd.read_pickle(cached_fname)
elif (not force_predict):
delta = 0
while (not os.path.exists(cached_fname)):
d = (datetime.datetime.today() - datetime.timedelta(days=delta))
cached_fname = oj(cached_dir, f'preds_{d.month}_{d.day}_cached.pkl')
delta += 1
return pd.read_pickle(cached_fname)
print('cached fname', cached_fname)
print('predictions not cached, now calculating (might take a while)')
for outcome in outcomes:
print(f'predicting {outcome}...')
tmp = [0 for _ in range(df_county.shape[0])]
for num_days_in_future in tqdm(NUM_DAYS_LIST):
print('')
print(f'Begin fit and predict for target day {num_days_in_future}')
output_key = f'Predicted {outcome} {num_days_in_future}-day'
df_county = fit_and_predict_ensemble(df_county, methods=BEST_MODEL, outcome=outcome.lower(), mode='predict_future', target_day=np.array([num_days_in_future]), output_key=output_key, verbose=verbose, expanded_shared_time_truncation=expanded_shared_time_truncation, expanded_shared_max_days=expanded_shared_max_days)
vals = df_county[output_key].values
out = []
for i in range(vals.shape[0]):
if np.isnan(vals[i]):
out.append(0)
else:
out.append(max(vals[i][0], list(df_county[outcome.lower()])[i][(- 1)], tmp[i]))
df_county[output_key] = out
tmp = out
output_key = f'Predicted {outcome} Intervals'
if add_predict_interval:
if (not interval_target_days):
interval_target_days = NUM_DAYS_LIST
print('prediction intervals...')
print(interval_target_days)
df_county = add_prediction_intervals(df_county, target_day=np.array(interval_target_days), outcome=outcome.lower(), methods=BEST_MODEL, interval_type='local', output_key=output_key, expanded_shared_time_truncation=expanded_shared_time_truncation, expanded_shared_max_days=expanded_shared_max_days)
print('predicting 3-day lagged deaths...')
try:
output_key = f'Predicted Deaths 3-day Lagged'
df_county = fit_and_predict_ensemble(df_county, methods=BEST_MODEL, outcome='deaths', mode='eval_mode', target_day=np.array([3]), output_key=output_key, verbose=verbose, expanded_shared_time_truncation=expanded_shared_time_truncation, expanded_shared_max_days=expanded_shared_max_days)
except:
print('err predicting 3-day lagged deaths')
if (cached_dir is not None):
cached_fname_temp = (cached_fname[:(- 4)] + '_temp.pkl')
print('caching to', cached_fname_temp)
df_county.to_pickle(cached_fname_temp)
df_county[output_key] = [v[0] for v in df_county[output_key].values]
print('add recent deaths....')
DATA_DATE_FORMAT = '%m-%d-%Y'
one_week_ago = (datetime.datetime.today() - datetime.timedelta(days=8))
most_recent_str = ''
delta = 1
while (not (most_recent_str in df_county.keys())):
most_recent = (datetime.datetime.today() - datetime.timedelta(days=delta))
most_recent_str = ('#Deaths_' + most_recent.strftime(DATA_DATE_FORMAT))
delta += 1
if (most_recent <= one_week_ago):
print('problem: the most recent data in df_county is older than one week!')
df_county['recent_deaths'] = 0
else:
one_week_ago_str = ('#Deaths_' + one_week_ago.strftime(DATA_DATE_FORMAT))
df_county['recent_deaths'] = (df_county[most_recent_str] - df_county[one_week_ago_str])
if (cached_dir is not None):
print('recaching to', cached_fname)
df_county.to_pickle(cached_fname)
return df_county |
(inp1=arrays(shape=(3, 2, 10), dtype=np.float, elements=hypothesis.strategies.floats((- 100), 100)), inp2=arrays(shape=(3, 2, 10), dtype=np.float, elements=hypothesis.strategies.floats((- 100), 100)), intersection_temperature=floats(1e-05, 1.0), approximation_mode=sampled_from(['clipping', 'clipping_forward']), box_type=sampled_from([MinDeltaBoxTensor, BoxTensor]))
(print_blob=True, max_examples=1000)
def test_intersection_all_input_ranges(inp1, inp2, intersection_temperature, approximation_mode, box_type) -> None:
box1 = box_type(torch.tensor(inp1).float())
box2 = box_type(torch.tensor(inp2).float())
res = gumbel_intersection(box1, box2, intersection_temperature=intersection_temperature, approximation_mode=approximation_mode)
assert torch.isfinite(res.z).all()
assert torch.isfinite(res.Z).all()
hard_res = hard_intersection(box1, box2)
assert (res.z >= hard_res.z).all()
assert (res.Z <= hard_res.Z).all() |
def main():
global args, config, best_mota
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config = EasyDict(config['common'])
config.save_path = os.path.dirname(args.config)
model = build_model(config)
model.cuda()
optimizer = build_optim(model, config)
criterion = build_criterion(config.loss)
last_iter = (- 1)
best_mota = 0
if args.load_path:
if args.recover:
(best_mota, last_iter) = load_state(args.load_path, model, optimizer=optimizer)
else:
load_state(args.load_path, model)
cudnn.benchmark = True
(train_transform, valid_transform) = build_augmentation(config.augmentation)
train_dataset = build_dataset(config, set_source='train', evaluate=False, train_transform=train_transform)
trainval_dataset = build_dataset(config, set_source='train', evaluate=True, valid_transform=valid_transform)
val_dataset = build_dataset(config, set_source='val', evaluate=True, valid_transform=valid_transform)
train_sampler = DistributedGivenIterationSampler(train_dataset, config.lr_scheduler.max_iter, config.batch_size, world_size=1, rank=0, last_iter=last_iter)
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=False, num_workers=config.workers, pin_memory=True, sampler=train_sampler)
lr_scheduler = build_lr_scheduler(config.lr_scheduler, optimizer)
tb_logger = SummaryWriter((config.save_path + '/events'))
logger = create_logger('global_logger', (config.save_path + '/log.txt'))
logger.info('args: {}'.format(pprint.pformat(args)))
logger.info('config: {}'.format(pprint.pformat(config)))
tracking_module = TrackingModule(model, optimizer, criterion, config.det_type)
if args.evaluate:
logger.info('Evaluation on traing set:')
validate(trainval_dataset, tracking_module, 'last', part='train')
logger.info('Evaluation on validation set:')
validate(val_dataset, tracking_module, 'last', part='val')
return
train(train_loader, val_dataset, trainval_dataset, tracking_module, lr_scheduler, (last_iter + 1), tb_logger) |
def self_disc_net(args, data=None):
model = self_D_net(args)
model.D.load_state_dict(data)
return model |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--audio-dirs', nargs='+', default=['-'], required=True, help='input directories with audio files')
parser.add_argument('--labels', required=True, help='aggregated input labels with format <ID LABEL> per line', type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument('--spm-model', required=True, help='sentencepiece model to use for encoding', type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument('--dictionary', required=True, help='file to load fairseq dictionary from', type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument('--audio-format', choices=['flac', 'wav'], default='wav')
parser.add_argument('--output', required=True, type=argparse.FileType('w'), help='path to save json output')
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(' ', 1)
labels[utt_id] = label
if (len(labels) == 0):
raise Exception('No labels found in ', args.labels_path)
Sample = namedtuple('Sample', 'aud_path utt_id')
samples = []
for (path, _, files) in chain.from_iterable((os.walk(path) for path in args.audio_dirs)):
for f in files:
if f.endswith(args.audio_format):
if (len(os.path.splitext(f)) != 2):
raise Exception('Expect <utt_id.extension> file name. Got: ', f)
utt_id = os.path.splitext(f)[0]
if (utt_id not in labels):
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {executor.submit(process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict): s for s in samples}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print('generated an exception: ', exc)
else:
utts.update(data)
json.dump({'utts': utts}, args.output, indent=4) |
def test_kernel_tuning(random_data):
(X, Y) = random_data
param_grid = {'kernel': ['poly'], 'c': [[0.1], [0.1, 0.2]], 'degree': [[2], [2, 3]]}
kernel_reg = GridSearchCV(KCCA(latent_dimensions=1), param_grid=param_grid, cv=2, verbose=True).fit([X, Y])
assert hasattr(kernel_reg, 'best_estimator_') |
class Trainer(object):
def __init__(self, args):
self.args = args
filehandler = logging.FileHandler(args.logging_file)
streamhandler = logging.StreamHandler()
self.logger = logging.getLogger('')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(filehandler)
self.logger.addHandler(streamhandler)
self.logger.info(args)
input_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
if (platform.system() == 'Darwin'):
data_root = os.path.join('~', 'Nutstore Files', 'Dataset')
elif (platform.system() == 'Linux'):
data_root = os.path.join('~', 'datasets')
if args.colab:
data_root = '/content/datasets'
else:
raise ValueError('Notice Dataset Path')
data_kwargs = {'base_size': args.base_size, 'transform': input_transform, 'crop_size': args.crop_size, 'root': data_root, 'base_dir': args.dataset}
trainset = IceContrast(split=args.train_split, mode='train', **data_kwargs)
valset = IceContrast(split=args.val_split, mode='testval', **data_kwargs)
self.train_data = gluon.data.DataLoader(trainset, args.batch_size, shuffle=True, last_batch='rollover', num_workers=args.workers)
self.eval_data = gluon.data.DataLoader(valset, args.test_batch_size, last_batch='rollover', num_workers=args.workers)
layers = ([args.blocks] * 3)
channels = [(x * args.channel_times) for x in [8, 16, 32, 64]]
if (args.model == 'ResNetFPN'):
model = ASKCResNetFPN(layers=layers, channels=channels, fuse_mode=args.fuse_mode, tiny=args.tiny, classes=trainset.NUM_CLASS)
elif (args.model == 'ResUNet'):
model = ASKCResUNet(layers=layers, channels=channels, fuse_mode=args.fuse_mode, tiny=args.tiny, classes=trainset.NUM_CLASS)
print('layers: ', layers)
print('channels: ', channels)
print('fuse_mode: ', args.fuse_mode)
print('tiny: ', args.tiny)
print('classes: ', trainset.NUM_CLASS)
if (args.host == 'xxx'):
self.host_name = socket.gethostname()
else:
self.host_name = args.host
self.save_prefix = '_'.join([args.model, args.fuse_mode, args.dataset, self.host_name, 'GPU', args.gpus])
model.cast(args.dtype)
if (args.resume is not None):
if os.path.isfile(args.resume):
model.load_parameters(args.resume, ctx=args.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
else:
model.initialize(init=init.MSRAPrelu(), ctx=args.ctx, force_reinit=True)
print('Model Initializing')
print('args.ctx: ', args.ctx)
self.net = model
if args.summary:
summary(self.net, mx.nd.zeros((1, 3, args.crop_size, args.crop_size), ctx=args.ctx[0]))
sys.exit()
self.criterion = SoftIoULoss()
self.lr_scheduler = LRSequential([LRScheduler('linear', base_lr=0, target_lr=args.lr, nepochs=args.warmup_epochs, iters_per_epoch=len(self.train_data)), LRScheduler(mode='poly', base_lr=args.lr, nepochs=(args.epochs - args.warmup_epochs), iters_per_epoch=len(self.train_data), power=0.9)])
kv = mx.kv.create(args.kvstore)
if (args.optimizer == 'sgd'):
optimizer_params = {'lr_scheduler': self.lr_scheduler, 'wd': args.weight_decay, 'momentum': args.momentum, 'learning_rate': args.lr}
elif (args.optimizer == 'adam'):
optimizer_params = {'lr_scheduler': self.lr_scheduler, 'wd': args.weight_decay, 'learning_rate': args.lr}
elif (args.optimizer == 'adagrad'):
optimizer_params = {'wd': args.weight_decay, 'learning_rate': args.lr}
else:
raise ValueError('Unsupported optimizer {} used'.format(args.optimizer))
if (args.dtype == 'float16'):
optimizer_params['multi_precision'] = True
if args.no_wd:
for (k, v) in self.net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
self.optimizer = gluon.Trainer(self.net.collect_params(), args.optimizer, optimizer_params, kvstore=kv)
self.iou_metric = SigmoidMetric(1)
self.nIoU_metric = SamplewiseSigmoidMetric(1, score_thresh=self.args.score_thresh)
self.best_iou = 0
self.best_nIoU = 0
self.is_best = False
def training(self, epoch):
tbar = tqdm(self.train_data)
train_loss = 0.0
for (i, batch) in enumerate(tbar):
data = gluon.utils.split_and_load(batch[0], ctx_list=self.args.ctx, batch_axis=0)
labels = gluon.utils.split_and_load(batch[1], ctx_list=self.args.ctx, batch_axis=0)
losses = []
with autograd.record(True):
for (x, y) in zip(data, labels):
pred = self.net(x)
loss = self.criterion(pred, y.astype(self.args.dtype, copy=False))
losses.append(loss)
mx.nd.waitall()
autograd.backward(losses)
self.optimizer.step(self.args.batch_size)
for loss in losses:
train_loss += (np.mean(loss.asnumpy()) / len(losses))
tbar.set_description(('Epoch %d, training loss %.4f' % (epoch, (train_loss / (i + 1)))))
if ((i != 0) and ((i % self.args.log_interval) == 0)):
self.logger.info(('Epoch %d iteration %04d/%04d: training loss %.3f' % (epoch, i, len(self.train_data), (train_loss / (i + 1)))))
mx.nd.waitall()
def validation(self, epoch):
self.iou_metric.reset()
self.nIoU_metric.reset()
tbar = tqdm(self.eval_data)
for (i, batch) in enumerate(tbar):
data = gluon.utils.split_and_load(batch[0], ctx_list=self.args.ctx, batch_axis=0)
labels = gluon.utils.split_and_load(batch[1], ctx_list=self.args.ctx, batch_axis=0)
preds = []
for (x, y) in zip(data, labels):
pred = self.net(x)
preds.append(pred)
self.iou_metric.update(preds, labels)
self.nIoU_metric.update(preds, labels)
(_, IoU) = self.iou_metric.get()
(_, nIoU) = self.nIoU_metric.get()
tbar.set_description(('Epoch %d, IoU: %.4f, nIoU: %.4f' % (epoch, IoU, nIoU)))
if (IoU > self.best_iou):
self.best_iou = IoU
self.net.save_parameters(('%s/%.4f-%s-%s-%d-best.params' % (self.args.save_dir, IoU, self.save_prefix, 'IoU', epoch)))
with open((self.save_prefix + '_best_IoU.log'), 'a') as f:
now = datetime.now()
dt_string = now.strftime('%d/%m/%Y %H:%M:%S')
f.write('{} - {:04d}:\t{:.4f}\n'.format(dt_string, epoch, IoU))
if (nIoU > self.best_nIoU):
self.best_nIoU = nIoU
self.net.save_parameters(('%s/%.4f-%s-%s-%d-best.params' % (self.args.save_dir, nIoU, self.save_prefix, 'nIoU', epoch)))
with open((self.save_prefix + '_best_nIoU.log'), 'a') as f:
now = datetime.now()
dt_string = now.strftime('%d/%m/%Y %H:%M:%S')
f.write('{} - {:04d}:\t{:.4f}\n'.format(dt_string, epoch, nIoU))
if (epoch >= (args.epochs - 1)):
with open((self.save_prefix + '_best_IoU.log'), 'a') as f:
f.write('Finished\n')
with open((self.save_prefix + '_best_nIoU.log'), 'a') as f:
f.write('Finished\n')
print('best_iou: ', self.best_iou)
print('best_nIoU: ', self.best_nIoU) |
def test_categorical_from_structure(X):
structure = ((), (0,), (1, 3), ())
distributions = _from_structure(X, structure=structure)
model = BayesianNetwork(distributions, structure=structure)
assert isinstance(model.distributions[0], Categorical)
assert isinstance(model.distributions[1], ConditionalCategorical)
assert isinstance(model.distributions[2], ConditionalCategorical)
assert isinstance(model.distributions[3], Categorical)
p0 = model.distributions[0].probs
p1 = model.distributions[1].probs[0]
p2 = model.distributions[2].probs[0]
p3 = model.distributions[3].probs
assert_array_almost_equal(p0, [[0.454545, 0.545455]])
assert_array_almost_equal(p1, [[0.4, 0.4, 0.2], [0.166667, 0.5, 0.333333]])
assert_array_almost_equal(p2, [[[0.0, 1.0], [0.5, 0.5]], [[0.5, 0.5], [1.0, 0.0]], [[0.5, 0.5], [0.0, 1.0]]])
assert_array_almost_equal(p3, [[0.4545, 0.5455]], 4) |
class FireReset(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
assert (env.unwrapped.get_action_meanings()[1] == 'FIRE'), 'Only use fire reset wrapper for suitable environment!'
assert (len(env.unwrapped.get_action_meanings()) >= 3), 'Only use fire reset wrapper for suitable environment!'
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
self.env.reset(**kwargs)
(obs, _, done, _) = self.env.step(1)
if done:
obs = self.env.reset(**kwargs)
return obs |
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss') |
def test():
print('Loading toy dataset from JSON...')
loader = DatasetLoader()
gtDataset = loader.read_json('data/toydata/gt.json')
print('>> {}'.format(gtDataset.phrases))
gtBoxList = gtDataset.boxes
print('Loading toy predictions from JSON...')
predDataset = loader.read_json('data/toydata/pred.json')
predictedBoxList = predDataset.boxes
iouThreshold = 0.5
assert (predDataset.size == gtDataset.size)
print('Evaluating toy dataset...')
evaluator = Evaluator()
(accuracy, iouList) = evaluator.evaluate(predictedBoxList, gtBoxList, iouThreshold)
print('>> Accuracy: {}'.format(accuracy))
for (pred, gt, iou) in zip(predictedBoxList, gtBoxList, iouList):
print('>>>> GT: {}, PRED: {}, IoU: {}'.format(gt, pred, iou)) |
def get_bboxes_scores(result):
bboxes = result['bbox'][0]
gt_bbox = result['gt_bbox'][0]
bbox_lengths = result['bbox'][1][0]
gt_lengths = result['gt_bbox'][1][0]
bbox_list = []
gt_box_list = []
for i in range(len(bbox_lengths)):
num = bbox_lengths[i]
for j in range(num):
dt = bboxes[j]
(clsid, score, xmin, ymin, xmax, ymax) = dt.tolist()
im_shape = result['im_shape'][0][i].tolist()
(im_height, im_width) = (int(im_shape[0]), int(im_shape[1]))
xmin *= im_width
ymin *= im_height
xmax *= im_width
ymax *= im_height
bbox_list.append([xmin, ymin, xmax, ymax, score])
faces_num_gt = 0
for i in range(len(gt_lengths)):
num = gt_lengths[i]
for j in range(num):
gt = gt_bbox[j]
(xmin, ymin, xmax, ymax) = gt.tolist()
im_shape = result['im_shape'][0][i].tolist()
(im_height, im_width) = (int(im_shape[0]), int(im_shape[1]))
xmin *= im_width
ymin *= im_height
xmax *= im_width
ymax *= im_height
gt_box_list.append([xmin, ymin, xmax, ymax])
faces_num_gt += 1
return (gt_box_list, bbox_list, faces_num_gt) |
class ECAPA_TDNN(nn.Module):
def __init__(self, C):
super(ECAPA_TDNN, self).__init__()
self.torchfbank = torch.nn.Sequential(PreEmphasis(), torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=512, win_length=400, hop_length=160, f_min=20, f_max=7600, window_fn=torch.hamming_window, n_mels=80))
self.specaug = FbankAug()
self.conv1 = nn.Conv1d(80, C, kernel_size=5, stride=1, padding=2)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(C)
self.layer1 = Bottle2neck(C, C, kernel_size=3, dilation=2, scale=8)
self.layer2 = Bottle2neck(C, C, kernel_size=3, dilation=3, scale=8)
self.layer3 = Bottle2neck(C, C, kernel_size=3, dilation=4, scale=8)
self.layer4 = nn.Conv1d((3 * C), 1536, kernel_size=1)
self.attention = nn.Sequential(nn.Conv1d(4608, 256, kernel_size=1), nn.ReLU(), nn.BatchNorm1d(256), nn.Tanh(), nn.Conv1d(256, 1536, kernel_size=1), nn.Softmax(dim=2))
self.bn5 = nn.BatchNorm1d(3072)
self.fc6 = nn.Linear(3072, 192)
self.bn6 = nn.BatchNorm1d(192)
def forward(self, x, aug=None):
with torch.no_grad():
x = (self.torchfbank(x) + 1e-06)
x = x.log()
x = (x - torch.mean(x, dim=(- 1), keepdim=True))
if (aug == True):
x = self.specaug(x)
x = self.conv1(x)
x = self.relu(x)
x = self.bn1(x)
x1 = self.layer1(x)
x2 = self.layer2((x + x1))
x3 = self.layer3(((x + x1) + x2))
x = self.layer4(torch.cat((x1, x2, x3), dim=1))
x = self.relu(x)
t = x.size()[(- 1)]
global_x = torch.cat((x, torch.mean(x, dim=2, keepdim=True).repeat(1, 1, t), torch.sqrt(torch.var(x, dim=2, keepdim=True).clamp(min=0.0001)).repeat(1, 1, t)), dim=1)
w = self.attention(global_x)
mu = torch.sum((x * w), dim=2)
sg = torch.sqrt((torch.sum(((x ** 2) * w), dim=2) - (mu ** 2)).clamp(min=0.0001))
x = torch.cat((mu, sg), 1)
x = self.bn5(x)
x = self.fc6(x)
x = self.bn6(x)
return x |
class GC_Block(nn.Module):
def __init__(self, in_features, p_dropout, output_nodes=48, bias=False):
super(GC_Block, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.gc1 = GraphConvolution(in_features, in_features, output_nodes=output_nodes, bias=bias)
self.bn1 = nn.BatchNorm1d((output_nodes * in_features))
self.gc2 = GraphConvolution(in_features, in_features, output_nodes=output_nodes, bias=bias)
self.bn2 = nn.BatchNorm1d((output_nodes * in_features))
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
def forward(self, x):
y = self.gc1(x)
(b, n, f) = y.shape
y = self.bn1(y.view(b, (- 1))).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
y = self.gc2(y)
(b, n, f) = y.shape
y = self.bn2(y.view(b, (- 1))).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
return (y + x)
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')') |
def mahalanobis_metric(p, S, L, U, pos_U, neg_U, args, encoder=None):
if (encoder is not None):
p = encoder(p)
S = encoder(S)
neg_index = (L == 0).nonzero()
pos_index = (L == 1).nonzero()
neg_index = neg_index.expand(neg_index.size(0), S.data.size(1))
pos_index = pos_index.expand(pos_index.size(0), S.data.size(1))
neg_S = torch.gather(S, 0, neg_index)
pos_S = torch.gather(S, 0, pos_index)
neg_mu = torch.mean(neg_S, dim=0, keepdim=True)
pos_mu = torch.mean(pos_S, dim=0, keepdim=True)
pos_mahalanobis_distance = (p - pos_mu).mm(pos_U.mm(pos_U.t())).mm((p - pos_mu).t()).diag().sqrt()
neg_mahalanobis_distance = (p - neg_mu).mm(neg_U.mm(neg_U.t())).mm((p - neg_mu).t()).diag().sqrt()
mahalanobis_ratio1 = (pos_mahalanobis_distance - neg_mahalanobis_distance)
mahalanobis_ratio2 = (neg_mahalanobis_distance - pos_mahalanobis_distance)
max_ratio = torch.max(mahalanobis_ratio1, mahalanobis_ratio2)
return max_ratio.clamp(0.01, 2) |
def batch_broadcast(tens_list: Sequence[Tensor], num_nonbatch: Sequence[int]):
assert (not isinstance(tens_list, Tensor))
assert (len(tens_list) == len(num_nonbatch))
assert all(((i >= 0) for i in num_nonbatch))
assert all(((t.ndim >= nnb) for (t, nnb) in zip(tens_list, num_nonbatch)))
if (len(tens_list) < 2):
return tens_list
b_shapes = [t.shape[:(t.ndim - nnb)] for (t, nnb) in zip(tens_list, num_nonbatch)]
try:
full_batch = shape_broadcast(b_shapes)
bdims = len(full_batch)
except ValueError:
raise ValueError(f"Following batch shapes couldn't be broadcast: {tuple(b_shapes)}")
def safe_expand(t, shp):
return (t if (len(shp) == 0) else t.expand(*shp))
tens_list = [t[((None,) * ((bdims + nnb) - t.ndim))] for (t, nnb) in zip(tens_list, num_nonbatch)]
shapes = [(full_batch + t.shape[bdims:]) for t in tens_list]
out_list = tuple((safe_expand(t, shp) for (t, shp) in zip(tens_list, shapes)))
return out_list |
def tensor2plotable(tensor) -> np.ndarray:
if isinstance(tensor, np.ndarray):
return tensor
elif isinstance(tensor, torch.Tensor):
return tensor.detach().cpu().numpy()
else:
raise TypeError(f'tensor should be an instance of Tensor, given {type(tensor)}') |
def train(model_path: str, train_dataset_path: str, pretrained_vectors: str='', lr: float=0.1, epochs: int=5) -> fasttext.FastText:
model = fasttext.train_supervised(input=train_dataset_path, pretrained_vectors=pretrained_vectors, dim=300, lr=lr, epoch=epochs, wordNgrams=3)
model.save_model(model_path)
return model |
class TFEncoderLayer(BaseModule):
def __init__(self, d_model=512, d_inner=256, n_head=8, d_k=64, d_v=64, dropout=0.1, qkv_bias=False, act_cfg=dict(type='mmcv.GELU'), operation_order=None):
super().__init__()
self.attn = MultiHeadAttention(n_head, d_model, d_k, d_v, qkv_bias=qkv_bias, dropout=dropout)
self.norm1 = nn.LayerNorm(d_model)
self.mlp = PositionwiseFeedForward(d_model, d_inner, dropout=dropout, act_cfg=act_cfg)
self.norm2 = nn.LayerNorm(d_model)
self.operation_order = operation_order
if (self.operation_order is None):
self.operation_order = ('norm', 'self_attn', 'norm', 'ffn')
assert (self.operation_order in [('norm', 'self_attn', 'norm', 'ffn'), ('self_attn', 'norm', 'ffn', 'norm')])
def forward(self, x, mask=None):
if (self.operation_order == ('self_attn', 'norm', 'ffn', 'norm')):
residual = x
x = (residual + self.attn(x, x, x, mask))
x = self.norm1(x)
residual = x
x = (residual + self.mlp(x))
x = self.norm2(x)
elif (self.operation_order == ('norm', 'self_attn', 'norm', 'ffn')):
residual = x
x = self.norm1(x)
x = (residual + self.attn(x, x, x, mask))
residual = x
x = self.norm2(x)
x = (residual + self.mlp(x))
return x |
class XLNetLMHeadModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def save_progress(text_encoder, placeholder_token_id, accelerator, config, save_path):
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]
learned_embeds_dict = {config.placeholder_token: learned_embeds.detach().cpu()}
torch.save(learned_embeds_dict, save_path) |
def rsinc1_dt_csc(t):
e = 0.01
r = torch.zeros_like(t)
a = torch.abs(t)
s = (a < e)
c = (s == 0)
t2 = (t[s] ** 2)
r[s] = ((t2 * ((t2 * (((4 * t2) / 675) + (2 / 63))) + (2 / 15))) + (1 / 3))
r[c] = (((1 / sin(t[c])) - ((t[c] * cos(t[c])) / (sin(t[c]) * sin(t[c])))) / sin(t[c]))
return r |
def test_run_exception_in_completed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.completed_event.side_effect = TypeError
run()
assert observer.completed_event.called
assert observer2.completed_event.called |
_task('audio_pretraining', dataclass=AudioPretrainingConfig)
class AudioPretrainingTask(FairseqTask):
cfg: AudioPretrainingConfig
def setup_task(cls, cfg: AudioPretrainingConfig, **kwargs):
return cls(cfg)
def load_dataset(self, split: str, task_cfg: FairseqDataclass=None, **kwargs):
data_path = self.cfg.data
task_cfg = (task_cfg or self.cfg)
if isinstance(task_cfg, Namespace):
if (not hasattr(task_cfg, 'autoregressive')):
task_cfg.autoregressive = (not (task_cfg.criterion == 'ctc'))
text_compression_level = getattr(TextCompressionLevel, str(self.cfg.text_compression_level))
compute_mask = (task_cfg.precompute_mask_config is not None)
mask_args = {}
if compute_mask:
mask_args = task_cfg.precompute_mask_config
if getattr(task_cfg, 'binarized_dataset', False):
self.datasets[split] = BinarizedAudioDataset(data_path, split=split, sample_rate=task_cfg.get('sample_rate', self.cfg.sample_rate), max_sample_size=self.cfg.max_sample_size, min_sample_size=self.cfg.min_sample_size, pad=((task_cfg.labels is not None) or task_cfg.enable_padding), normalize=task_cfg.normalize, num_buckets=(self.cfg.num_batch_buckets or int(self.cfg.tpu)), compute_mask=compute_mask, **mask_args)
else:
manifest_path = os.path.join(data_path, '{}.tsv'.format(split))
self.datasets[split] = FileAudioDataset(manifest_path=manifest_path, sample_rate=task_cfg.get('sample_rate', self.cfg.sample_rate), max_sample_size=self.cfg.max_sample_size, min_sample_size=self.cfg.min_sample_size, pad=((task_cfg.labels is not None) or task_cfg.enable_padding), normalize=task_cfg.normalize, num_buckets=(self.cfg.num_batch_buckets or int(self.cfg.tpu)), text_compression_level=text_compression_level, compute_mask=compute_mask, **mask_args)
if (getattr(task_cfg, 'subsample', 1) < 1):
self.datasets[split] = SubsampleDataset(self.datasets[split], task_cfg.subsample, shuffle=True, seed=task_cfg.seed)
if (self.cfg.tpu and (task_cfg.inferred_w2v_config.mask_channel_prob == 0.0)):
logger.info('Pretraining on TPUs may suffer convergence issues when training with `mask_channel_prob` value of 0. You may want to set this to a low value close to 0.')
def max_positions(self):
return (sys.maxsize, sys.maxsize)
def build_model(self, model_cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(model_cfg, from_checkpoint)
actualized_cfg = getattr(model, 'cfg', None)
if (actualized_cfg is not None):
if hasattr(actualized_cfg, 'w2v_args'):
model_cfg.w2v_args = actualized_cfg.w2v_args
return model
def post_save(self, cp_path, num_updates):
if (self.cfg.post_save_script is not None):
logger.info(f'launching {self.cfg.post_save_script}')
import os.path as osp
from fairseq.file_io import PathManager
eval_cp_path = osp.join(osp.dirname(cp_path), f'checkpoint_eval_{num_updates}.pt')
print(cp_path, eval_cp_path, osp.dirname(cp_path))
assert PathManager.copy(cp_path, eval_cp_path, overwrite=True), f'Failed to copy {cp_path} to {eval_cp_path}'
import subprocess
import shlex
subprocess.call(shlex.split(f'{self.cfg.post_save_script} {eval_cp_path}')) |
def make_multiple_dataset_real(dir, max_dataset_size=float('inf')):
subdir = ['faces/celebahq/real-tfr-1024-resized128', 'faces/celebahq/real-tfr-1024-resized128', 'faces/celebahq/real-tfr-1024-resized128', 'faceforensics_aligned/Deepfakes/original', 'faceforensics_aligned/Face2Face/original', 'faceforensics_aligned/FaceSwap/original', 'faceforensics_aligned/NeuralTextures/original']
total_image_list = []
(last_dir, dir) = (dir.split('/')[(- 1)], '/'.join(dir.split('/')[:(- 1)]))
print(dir)
for sdir in subdir:
curr_dir = (((((dir + '/') + sdir) + '/') + last_dir) + '/')
print(curr_dir)
cache = (curr_dir.rstrip('/') + '.txt')
if os.path.isfile(cache):
print(('Using filelist cached at %s' % cache))
with open(cache) as f:
images = [line.strip() for line in f]
if images[0].startswith(curr_dir):
print('Using image list from older version')
image_list = []
for image in images:
image_list.append(image)
else:
print('Adding prefix to saved image list')
image_list = []
prefix = os.path.dirname(curr_dir.rstrip('/'))
for image in images:
image_list.append(os.path.join(prefix, image))
image_list = random.sample(image_list, min(max_dataset_size, len(image_list)))
total_image_list += image_list
else:
print('Walking directory ...')
images = []
assert os.path.isdir(curr_dir), ('%s is not a valid directory' % curr_dir)
for (root, _, fnames) in sorted(os.walk(curr_dir, followlinks=True)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
image_list = random.sample(images, min(max_dataset_size, len(images)))
with open(cache, 'w') as f:
prefix = (os.path.dirname(curr_dir.rstrip('/')) + '/')
for i in image_list:
f.write(('%s\n' % util.remove_prefix(i, prefix)))
total_image_list += image_list
return total_image_list |
def save_image_array_as_png(image, output_path):
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG') |
def test_baseline_realnvp_config():
config = get_config(dataset='mnist', model='realnvp', use_baseline=True)
true_config = {'schema_type': 'multiscale-realnvp', 'use_cond_affine': False, 'pure_cond_affine': False, 'g_hidden_channels': [64, 64, 64, 64, 64, 64, 64, 64], 'num_u_channels': 0, 'early_stopping': True, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0, 'logit_tf_lambda': 1e-06, 'logit_tf_scale': 256, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': True, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 1, 'num_test_importance_samples': 1}
assert (true_config == config) |
class ConvertBCHWtoCBHW(nn.Module):
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.permute(1, 0, 2, 3) |
def batch_mae_frame_float(gen_frames, gt_frames):
if (gen_frames.ndim == 3):
axis = (1, 2)
elif (gen_frames.ndim == 4):
axis = (1, 2, 3)
x = np.float32(gen_frames)
y = np.float32(gt_frames)
mae = np.sum(np.absolute((x - y)), axis=axis, dtype=np.float32)
return np.mean(mae) |
def sensor_callback(sensor_data, sensor_queue, sensor_name):
sensor_queue.put((sensor_data.frame, sensor_name)) |
class Search():
def __init__(self, forward_predictor: ForwardPredictor, forward_enumerator: ForwardEnumerator, value_heuristic: ValueHeuristic, action_enumerator: ActionEnumerator, random_state_enumerator: RandomStateEnumerator, random_state_predictor: RandomStatePredictor, opponent_action_enumerator: OpponentActionEnumerator, opponent_action_predictor: OpponentActionPredictor):
self.forward_predictor = forward_predictor
self.forward_enumerator = forward_enumerator
self.value_heuristic = value_heuristic
self.action_enumerator = action_enumerator
self.random_state_enumerator = random_state_enumerator
self.random_state_predictor = random_state_predictor
self.opponent_action_enumerator = opponent_action_enumerator
self.opponent_action_predictor = opponent_action_predictor
def expand(self, node_id):
raise NotImplementedError |
def _assess_dimension_(spectrum, unscaled_vhat, rank, n_samples, n_features, alpha=1, beta=1):
if (rank > len(spectrum)):
raise ValueError('The tested rank cannot exceed the rank of the dataset')
pu = ((- rank) * np.log(2.0))
for i in range(rank):
pu += (gammaln(((n_features - i) / 2.0)) - ((np.log(np.pi) * (n_features - i)) / 2.0))
pl = np.sum(np.log(spectrum[:rank]))
pl = (((- pl) * n_samples) / 2.0)
if (rank == n_features):
pv = 0
v = 1
else:
v = (unscaled_vhat / (n_features - rank))
pv = ((((- np.log(v)) * n_samples) * (n_features - rank)) / 2.0)
m = ((n_features * rank) - ((rank * (rank + 1.0)) / 2.0))
pp = ((np.log((2.0 * np.pi)) * ((m + rank) + 1.0)) / 2.0)
pa = 0.0
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range((i + 1), len(spectrum)):
pa += (np.log(((spectrum[i] - spectrum[j]) * ((1.0 / spectrum_[j]) - (1.0 / spectrum_[i])))) + np.log(n_samples))
ll = (((((pu + pl) + pv) + pp) - (pa / 2.0)) - ((((rank + m) * np.log(n_samples)) * 3) / 2.0))
return ll |
def false_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs):
return (1 - sensitivity(test, reference, confusion_matrix, nan_for_nonexisting)) |
class Dense(Model):
def initialize(self, outsize, usebias=True, batch_norm=False, activation=(- 1)):
self.fclayer = L.fcLayer(outsize, usebias=usebias)
self.batch_norm = batch_norm
self.activation_ = activation
if batch_norm:
self.bn = L.batch_norm()
if (activation != (- 1)):
self.activation = L.activation(activation)
def forward(self, x):
x = self.fclayer(x)
if self.batch_norm:
x = self.bn(x)
if (self.activation_ != (- 1)):
x = self.activation(x)
return x |
def wrap(text, cols):
lines = []
while (len(text) > cols):
end = text.rfind(' ', 0, (cols + 1))
if (end == (- 1)):
end = cols
(line, text) = (text[:end], text[end:])
lines.append(line)
return lines |
def test_watershed_nodams():
nodam_watershed_result = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4])
nodam_watershed = morpho.watershed(landscape, dams=False)
assert_array_equal(nodam_watershed, nodam_watershed_result) |
class SimulatedAnnealing(Algorithm[(S, R)], threading.Thread):
def __init__(self, problem: Problem[S], mutation: Mutation, termination_criterion: TerminationCriterion, solution_generator: Generator=store.default_generator):
super(SimulatedAnnealing, self).__init__()
self.problem = problem
self.mutation = mutation
self.termination_criterion = termination_criterion
self.solution_generator = solution_generator
self.observable.register(termination_criterion)
self.temperature = 1.0
self.minimum_temperature = 1e-06
self.alpha = 0.95
self.counter = 0
def create_initial_solutions(self) -> List[S]:
return [self.solution_generator.new(self.problem)]
def evaluate(self, solutions: List[S]) -> List[S]:
return [self.problem.evaluate(solutions[0])]
def stopping_condition_is_met(self) -> bool:
return self.termination_criterion.is_met
def init_progress(self) -> None:
self.evaluations = 0
def step(self) -> None:
mutated_solution = copy.deepcopy(self.solutions[0])
mutated_solution: Solution = self.mutation.execute(mutated_solution)
mutated_solution = self.evaluate([mutated_solution])[0]
acceptance_probability = self.compute_acceptance_probability(self.solutions[0].objectives[0], mutated_solution.objectives[0], self.temperature)
if (acceptance_probability > random.random()):
self.solutions[0] = mutated_solution
self.temperature *= self.alpha
def compute_acceptance_probability(self, current: float, new: float, temperature: float) -> float:
if (new < current):
return 1.0
else:
t = (temperature if (temperature > self.minimum_temperature) else self.minimum_temperature)
value = ((new - current) / t)
return numpy.exp(((- 1.0) * value))
def update_progress(self) -> None:
self.evaluations += 1
observable_data = self.observable_data()
self.observable.notify_all(**observable_data)
def observable_data(self) -> dict:
ctime = (time.time() - self.start_computing_time)
return {'PROBLEM': self.problem, 'EVALUATIONS': self.evaluations, 'SOLUTIONS': self.get_result(), 'COMPUTING_TIME': ctime}
def get_result(self) -> R:
return self.solutions[0]
def get_name(self) -> str:
return 'Simulated Annealing' |
def main():
parser = ArgumentParser(usage='python parse_trace_json.py trace_1.json')
parser.add_argument('json_files', nargs='*')
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
kernel_start_times = []
for json_file in args.json_files:
with open(json_file, 'r') as fin:
json_obj = load(fin)
df = prepare_df(json_obj)
kernel_start_times.append(df.query("cat=='kernel'").head(1)['ts'].values[0])
print('kernel 5 sample:\n', df.query("cat=='kernel'").head(n=5))
ret = analyze_gpu_kernel(df)
print('compute summary:', ret)
ret = analyze_communicate_overlap(df, args.verbose)
nooverlap_comm_df = ret.pop('nooverlap_comm_df')
print('communicate summary:', ret)
print('no overlap comm op:\n', nooverlap_comm_df)
kernel_start_times = np.asarray(kernel_start_times)
min_start_time = np.min(kernel_start_times)
print((kernel_start_times - min_start_time)) |
def simxRemoveUI(clientID, uiHandle, operationMode):
return c_RemoveUI(clientID, uiHandle, operationMode) |
def load_result(path):
fullpath = os.path.join(path, 'rollout.json')
if (not os.path.exists(fullpath)):
return None
results = json.load(open(fullpath, 'rb'))
score = (results['score'] * 100)
return score |
def save_model(args, model, is_best):
print('Saving model...')
model_name = 'match_{}_cycle_{}_trans_{}_coseg_{}_task_{}.pth.tar'.format(args.w_match, args.w_cycle, args.w_trans, args.w_coseg, args.w_task)
model_path = os.path.join(args.result_model_dir, model_name)
torch.save(model.state_dict(), model_path)
if is_best:
best_model_path = os.path.join(args.result_model_dir, 'best_{}'.format(model_name))
shutil.copyfile(model_path, best_model_path)
return |
class BackendTestCase(QiskitTestCase):
backend_cls = None
circuit = ReferenceCircuits.bell()
def setUp(self):
super().setUp()
self.backend = self._get_backend()
def setUpClass(cls):
if (cls is BackendTestCase):
raise SkipTest('Skipping base class tests')
super().setUpClass()
def _get_backend(self):
return self.backend_cls()
def test_configuration(self):
configuration = self.backend.configuration()
return configuration
def test_properties(self):
properties = self.backend.properties()
if self.backend.configuration().simulator:
self.assertEqual(properties, None)
return properties
def test_status(self):
status = self.backend.status()
return status
def test_run_circuit(self):
job = execute(self.circuit, self.backend)
result = job.result()
self.assertEqual(result.success, True)
return result |
class DemoFeatures(AbstractFeatures):
def __init__(self, kdl_kin, config):
self.config = config
self.kdl_kin = kdl_kin
self.features = RobotFeatures(self.config, self.kdl_kin)
def compute(self, world, state):
if (state.reference is not None):
ee = pm.fromMatrix(self.kdl_kin.forward(state.q))
if state.gripper_closed:
gripper = 1.0
else:
gripper = 0.0
f = np.array(self.features.GetFeatures(ee, (state.seq / len(state.traj.points)), world.observation, ['time', state.reference.goal], gripper))
return f
else:
return None
def updateBounds(self, world):
raise Exception('feature.updateBounds not yet implemented!')
def getBounds(self):
raise Exception('feature.getBounds not yet implemented!') |
class LayerNormalization(Layer):
def __init__(self, hidden_size, bigdl_type='float'):
super(LayerNormalization, self).__init__(None, bigdl_type, hidden_size) |
class GPT2LMHeadModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def get_acc_from_qid_dicts(qid2preds, qid2targets):
qids = qid2preds.keys()
preds = np.asarray([int(qid2preds[ele]) for ele in qids])
targets = np.asarray([int(qid2targets[ele]) for ele in qids])
acc = (sum((preds == targets)) / float(len(preds)))
return acc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.