code stringlengths 101 5.91M |
|---|
def test_cache_directory_set(mkdir: MagicMock) -> None:
my_dir: str = '~/my_dir'
cache.config.cache_directory = my_dir
assert (cache.config.cache_directory == my_dir)
assert mkdir.called_once_with(my_dir) |
def clip_by_tensor(t, t_min, t_max):
t = t.float()
result = (((t >= t_min).float() * t) + ((t < t_min).float() * t_min))
result = (((result <= t_max).float() * result) + ((result > t_max).float() * t_max))
return result |
def init_model(model, opt, argv):
if (hasattr(opt, 'weight_init') and (opt.weight_init == 'xavier')):
network_weight_xavier_init(model)
elif (hasattr(opt, 'weight_init') and (opt.weight_init == 'MSRAPrelu')):
network_weight_MSRAPrelu_init(model)
elif (hasattr(opt, 'weight_init') and (opt.weight_init == 'stupid')):
network_weight_stupid_init(model)
elif (hasattr(opt, 'weight_init') and (opt.weight_init == 'zero')):
network_weight_zero_init(model)
elif (hasattr(opt, 'weight_init') and (opt.weight_init == '01')):
network_weight_01_init(model)
elif (hasattr(opt, 'weight_init') and (opt.weight_init == 'custom')):
if (not hasattr(model, 'init_parameters')):
logging.info('Warning! No init_parameters found')
else:
model.init_parameters()
elif (hasattr(opt, 'weight_init') and (opt.weight_init == 'None')):
logging.info('Warning!!! model loaded without initialization !')
else:
raise ValueError('Unknown weight_init')
if (hasattr(opt, 'bn_momentum') and (opt.bn_momentum is not None)):
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.momentum = opt.bn_momentum
if (hasattr(opt, 'bn_eps') and (opt.bn_eps is not None)):
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eps = opt.bn_eps
return model |
def mhgls_params_from_sums(sums, YY, ybar):
(C, S, CC, CS, SS, YC, YS) = sums
nharms = len(C)
A = np.block([[CC, CS], [CS.T, SS]])
b = np.concatenate((YC, YS))
theta = np.linalg.solve(A, b)
cn = theta[:nharms]
sn = theta[nharms:]
offset = (ybar - (np.dot(cn, C) + np.dot(sn, S)))
return (cn, sn, offset) |
def get_loader(image_root, gt_root, batchsize, trainsize, test_root, test_gt_root, shuffle=True, num_workers=12, pin_memory=True):
dataset = SalObjDataset(image_root, gt_root, trainsize)
data_loader = data.DataLoader(dataset=dataset, batch_size=batchsize, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
tst_dataset = test_dataset(test_root, test_gt_root, trainsize)
test_loader = data.DataLoader(dataset=tst_dataset, batch_size=1, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
return (data_loader, test_loader) |
def main():
init_ivadomed()
parser = get_parser()
args = parser.parse_args()
visualize_and_compare_models(args.ofolders, args.metric, args.metadata) |
def wrap_if_pmap(p_func: Callable) -> Callable:
def p_func_if_pmap(obj, axis_name):
try:
core.axis_frame(axis_name)
return p_func(obj, axis_name)
except NameError:
return obj
return p_func_if_pmap |
class Experiment(object):
def __init__(self, L, E):
self.Loader = L
self.Eval = E
self.Model = L.Model
if ((L.MODE in 'test') or (L.mode == 'ft')):
self.Model.load_state_dict(torch.load(L.mpath, map_location='cpu'))
self.Model = (self.Model.eval() if (L.MODE == 'test') else self.Model.train())
if (not L.cpu):
self.Model = torch.nn.DataParallel(self.Model.cuda(L.ids[0]), device_ids=L.ids)
def optims(self, optim, params):
if (optim == 'SGD'):
print('using SGD')
return torch.optim.SGD(params=params, momentum=0.9, weight_decay=0.0005)
elif (optim == 'Adam'):
print('using Adam')
return torch.optim.Adam(params=params, weight_decay=0.0005)
def schedulers(self, scheduler, optimizer):
if (scheduler == 'StepLR'):
return torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
elif (scheduler == 'ReduceLROnPlateau'):
return torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer) |
def generate_predicted_files():
symbol_file_path = '../symbol_farm/symbol_64_512_16L_3scales_v1_deploy.json'
model_file_path = '../saved_model/configuration_64_512_16L_3scales_v1_2019-09-29-13-41-44/train_64_512_16L_3scales_v1_iter_600000.params'
my_predictor = Predict(mxnet=mxnet, symbol_file_path=symbol_file_path, model_file_path=model_file_path, ctx=mxnet.gpu(0), receptive_field_list=cfg.param_receptive_field_list, receptive_field_stride=cfg.param_receptive_field_stride, bbox_small_list=cfg.param_bbox_small_list, bbox_large_list=cfg.param_bbox_large_list, receptive_field_center_start=cfg.param_receptive_field_center_start, num_output_scales=cfg.param_num_output_scales)
txt_file_path = '../data_provider_farm/data_folder/data_list_CCPD_test.txt'
predicted_file_root = ('./CCPD_testset_predicted_files_for_evaluation_' + os.path.basename(model_file_path).split('.')[0])
if (not os.path.exists(predicted_file_root)):
os.makedirs(predicted_file_root)
fin = open(txt_file_path, 'r')
resize_scale = 1
score_threshold = 0.2
NMS_threshold = 0.6
counter = 0
for line in fin:
line = line.strip('\n').split(',')
im = cv2.imread(line[0], cv2.IMREAD_COLOR)
bboxes = my_predictor.predict(im, resize_scale=resize_scale, score_threshold=score_threshold, top_k=10000, NMS_threshold=NMS_threshold)
predicted_file_name = os.path.basename(line[0]).replace('jpg', 'txt')
fout = open(os.path.join(predicted_file_root, predicted_file_name), 'w')
for bbox in bboxes:
fout.write((('LP %.03f %d %d %d %d' % ((bbox[4] if (bbox[4] <= 1) else 1), math.floor(bbox[0]), math.floor(bbox[1]), math.ceil((bbox[2] - bbox[0])), math.ceil((bbox[3] - bbox[1])))) + '\n'))
fout.close()
counter += 1
print(('[%d] is processed.' % counter)) |
def stop_gradient_if_not(cond, *args):
outputs = []
for v in args:
outputs.append(tf.reshape(tf.where(cond, v, tf.stop_gradient(v)), get_shape(v)))
if (len(outputs) == 0):
outputs = None
elif (len(outputs) == 1):
outputs = outputs[0]
else:
outputs = tuple(outputs)
return outputs |
class SingleEdgeGraphFormatter(BaseGraphFormatter):
def __init__(self, config, name='SingleEdgeGraphFormatter'):
self.name = name
self.config = config
BaseGraphFormatter.__init__(self, config, name)
def format(self, item_json, vocab_dicts):
(token_vd, node_vd, target_vd, word_vd) = vocab_dicts
datapoint = self.datapoint_class()
dgl_graph = self._convert_to_dglgraph(item_json['jsgraph'], token_vd, node_vd)
datapoint.function_graph = dgl_graph
datapoint.function = item_json['function']
datapoint.graph_size = item_json['graph_size']
self._set_target(datapoint, item_json['target'], token_vd)
return datapoint
def _set_target(self, dp, target, vd):
if (type(target) == int):
dp.tgt = target
else:
tok_tgt = self.t3_parser.tokenize(target)
dp.tgt = tok_tgt
(tok_tgt, blen) = self.tokenize_sentence(tok_tgt, vd)
dp.tgt_vec = tok_tgt
def _convert_to_dglgraph(self, jsgraph, token_vd, node_vd):
edges = jsgraph['graph']
node_features = jsgraph['node_features']
src_edges = [x[0] for x in edges]
dst_edges = [x[1] for x in edges]
g = dgl.graph((src_edges, dst_edges))
assert (g.num_nodes() == len(node_features))
node_feat_vecs = ([0] * g.num_nodes())
node_feat_lens = ([0] * g.num_nodes())
for (key, node_feat) in node_features.items():
(node_feat, node_feat_len) = self.get_feats(node_feat, token_vd, node_vd)
node_feat_vecs[int(key)] = node_feat
node_feat_lens[int(key)] = node_feat_len
g.ndata['node_feat'] = stack_seq_to_tensor(node_feat_vecs)
g.ndata['node_len'] = torch.tensor(node_feat_lens, dtype=torch.long)
if self.config.use_cuda:
g = g.to('cuda')
return g
def get_feats(self, node_feat, token_vd, node_vd):
use_feats = self.config.node_emb_layer['use_nfeature']
if (use_feats == 'structure'):
(struct_vec, struct_len) = self.get_structure_feat(node_feat, node_vd)
return (struct_vec, struct_len)
elif (use_feats == 'textual'):
(text_vec, text_len) = self.get_textual_feat(node_feat, token_vd)
return (text_vec, text_len)
elif (use_feats == 'both'):
(struct_vec, struct_len) = self.get_structure_feat(node_feat, node_vd)
(text_vec, text_len) = self.get_textual_feat(node_feat, token_vd)
assert (len(struct_vec) == 1)
text_vec.insert(0, struct_vec[0])
return (text_vec, (text_len + 1))
def get_structure_feat(self, node_feat, node_vd):
node_type = node_feat[0]
(type_index, nlen) = self.tokenize_sentence(node_type, node_vd, eos=False)
return (type_index, nlen)
def get_textual_feat(self, node_feat, token_vd):
code_str = node_feat[1]
tk_code_str = self.t3_parser.tokenize(code_str)
(nidxes, nlen) = self.tokenize_sentence(tk_code_str, token_vd, eos=False)
if (not nidxes):
nidxes.append(token_vd.get_w2i('<EMPTY_CODE>'))
nlen = 1
nidxes = pad_to_max(nidxes, self.config.max_code_token_len, pad_token=None)
if (nlen > self.config.max_code_token_len):
nlen = self.config.max_code_token_len
return (nidxes, nlen) |
class AnyDataset(Dataset):
def __init__(self, root=None, file=None, split=None, processing=None, name=None, **kwargs):
assert (split is not None), 'Argument split cannot be None'
self.root = root
self.file = file
self.split = split
self.name = (name or 'any')
self.processing = eval((processing or 'lambda x: x'))
self.lines = make_lines(root, split, file, self.processing)
def __getitem__(self, index):
return {self.name: self.lines[index]}
def get_collate_fn(self):
def collate_fn(batch):
collated = {self.name: [s[self.name] for s in batch]}
return collated
return collate_fn
def __len__(self):
return len(self.lines)
def inference(self, sentences):
raise NotImplementedError()
def __repr__(self):
return ('AnyDataset\n' + json.dumps({'root': self.root, 'file': self.file, 'processing': self.processing, 'name': self.name}, indent=4, sort_keys=False, default=str)) |
class ImageFolder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=default_loader):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS, transform=transform, target_transform=target_transform)
self.imgs = self.samples |
class IN22KDATASET(data.Dataset):
def __init__(self, root, ann_file='', transform=None, target_transform=None):
super(IN22KDATASET, self).__init__()
self.data_path = root
self.ann_path = os.path.join(self.data_path, ann_file)
self.transform = transform
self.target_transform = target_transform
self.database = json.load(open(self.ann_path))
def _load_image(self, path):
try:
im = Image.open(path)
except:
print('ERROR IMG LOADED: ', path)
random_img = (np.random.rand(224, 224, 3) * 255)
im = Image.fromarray(np.uint8(random_img))
return im
def __getitem__(self, index):
idb = self.database[index]
images = self._load_image(((self.data_path + '/') + idb[0])).convert('RGB')
if (self.transform is not None):
images = self.transform(images)
target = int(idb[1])
if (self.target_transform is not None):
target = self.target_transform(target)
return (images, target)
def __len__(self):
return len(self.database) |
def mixconv1x1_block(in_channels, out_channels, kernel_count, stride=1, groups=1, bias=False, use_bn=True, bn_eps=1e-05, activation=(lambda : nn.ReLU(inplace=True))):
return MixConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=([1] * kernel_count), stride=stride, padding=([0] * kernel_count), groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) |
def _pad_num_var_param(rstart=1, max=None):
r = rstart
ret = []
while (r <= __MAX_RANK__):
h = (r * 2)
if ((max is not None) and (h > max)):
break
ret.append(h)
r += 1
return ret |
def get_last_checkpoint_if_any(checkpoint_folder):
os.makedirs(checkpoint_folder, exist_ok=True)
files = glob('{}/*.h5'.format(checkpoint_folder), recursive=True)
if (len(files) == 0):
return None
return natural_sort(files)[(- 1)] |
.ml_torch_only
.parametrize('seed', [123, 456])
def test_ragged_to_dense_random(dtype, ml, seed):
rng = np.random.RandomState(seed)
values = rng.random(size=(10000,)).astype(dtype)
row_splits = [0]
while (row_splits[(- 1)] < values.shape[0]):
row_splits.append((row_splits[(- 1)] + rng.randint(0, 10)))
row_splits[(- 1)] = values.shape[0]
row_splits = np.array(row_splits, dtype=np.int64)
out_col_size = rng.randint(1, 37)
default_value = np.array((- 1), dtype=dtype)
ans = mltest.run_op(ml, ml.device, True, ml.ops.ragged_to_dense, values, row_splits, out_col_size, default_value)
expected = np.full(((row_splits.shape[0] - 1), out_col_size), default_value)
for i in range((row_splits.shape[0] - 1)):
for (j, value_idx) in enumerate(range(row_splits[i], row_splits[(i + 1)])):
if (j < expected.shape[1]):
expected[(i, j)] = values[value_idx]
np.testing.assert_equal(ans, expected) |
def get_down_block(down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resnet_groups: Optional[int]=None, cross_attention_dim: Optional[int]=None, downsample_padding: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=True, only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', temporal_num_attention_heads: int=8, temporal_max_seq_length: int=32, transformer_layers_per_block: int=1) -> Union[('DownBlock3D', 'CrossAttnDownBlock3D', 'DownBlockMotion', 'CrossAttnDownBlockMotion', 'DownBlockSpatioTemporal', 'CrossAttnDownBlockSpatioTemporal')]:
if (down_block_type == 'DownBlock3D'):
return DownBlock3D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift)
elif (down_block_type == 'CrossAttnDownBlock3D'):
if (cross_attention_dim is None):
raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlock3D')
return CrossAttnDownBlock3D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift)
if (down_block_type == 'DownBlockMotion'):
return DownBlockMotion(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length)
elif (down_block_type == 'CrossAttnDownBlockMotion'):
if (cross_attention_dim is None):
raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlockMotion')
return CrossAttnDownBlockMotion(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length)
elif (down_block_type == 'DownBlockSpatioTemporal'):
return DownBlockSpatioTemporal(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample)
elif (down_block_type == 'CrossAttnDownBlockSpatioTemporal'):
if (cross_attention_dim is None):
raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlockSpatioTemporal')
return CrossAttnDownBlockSpatioTemporal(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_downsample=add_downsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads)
raise ValueError(f'{down_block_type} does not exist.') |
def get_inception_score():
all_samples = []
for i in range(10):
all_samples.append(session.run(samples_100))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples + 1.0) * (255.0 / 2)).astype('int32')
all_samples = all_samples.reshape(((- 1), 3, 32, 32)).transpose(0, 2, 3, 1)
return lib.inception_score.get_inception_score(list(all_samples)) |
class SmallScore(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
nef = (config.model.nef * 4)
self.u_net = nn.Sequential(nn.Conv2d(config.data.channels, nef, 4, stride=2, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.Conv2d(nef, (nef * 2), 3, stride=1, padding=1), nn.GroupNorm(4, (nef * 2)), nn.ELU(), nn.ConvTranspose2d((nef * 2), nef, 3, stride=1, padding=1), nn.GroupNorm(4, nef), nn.ELU(), nn.ConvTranspose2d(nef, config.data.channels, 4, stride=2, padding=1), nn.ELU())
self.fc = nn.Sequential(nn.Linear((config.data.channels * (10 ** 2)), 256), nn.LayerNorm(256), nn.ELU(), nn.Linear(256, (config.data.channels * (10 ** 2))))
def forward(self, x):
if (x.is_cuda and (self.config.training.ngpu > 1)):
score = nn.parallel.data_parallel(self.u_net, x, list(range(self.config.training.ngpu)))
else:
score = self.u_net(x)
score = self.fc(score.view(x.shape[0], (- 1))).view(x.shape[0], self.config.data.channels, 10, 10)
return score |
def get_all_results():
all_results = {}
for cfg in tqdm(cfgs):
robot_config_str = cfg.experiment_name.split('-')[0]
if (robot_config_str not in all_results):
all_results[robot_config_str] = {}
cutoff = all_cutoffs[robot_config_str][cfg.env_name]
curves = extend_curves(all_curves[cfg.experiment_name], min_len=(cutoff + 1))
cubes = [curve[cutoff] for curve in curves]
all_results[robot_config_str][cfg.experiment_name] = '{:.2f} {:.2f}'.format(np.mean(cubes), np.std(cubes))
return all_results |
def actor_rollout(obs, action, last, model, actor, critic, config):
n_agents = obs.shape[2]
with FreezeParameters([model]):
embed = model.observation_encoder(obs.reshape((- 1), n_agents, obs.shape[(- 1)]))
embed = embed.reshape(obs.shape[0], obs.shape[1], n_agents, (- 1))
prev_state = model.representation.initial_state(obs.shape[1], obs.shape[2], device=obs.device)
(prior, post, _) = rollout_representation(model.representation, obs.shape[0], embed, action, prev_state, last)
post = post.map((lambda x: x.reshape(((obs.shape[0] - 1) * obs.shape[1]), n_agents, (- 1))))
items = rollout_policy(model.transition, model.av_action, config.HORIZON, actor, post)
imag_feat = items['imag_states'].get_features()
imag_rew_feat = torch.cat([items['imag_states'].stoch[:(- 1)], items['imag_states'].deter[1:]], (- 1))
returns = critic_rollout(model, critic, imag_feat, imag_rew_feat, items['actions'], items['imag_states'].map((lambda x: x.reshape((- 1), n_agents, x.shape[(- 1)]))), config)
output = [items['actions'][:(- 1)].detach(), (items['av_actions'][:(- 1)].detach() if (items['av_actions'] is not None) else None), items['old_policy'][:(- 1)].detach(), imag_feat[:(- 1)].detach(), returns.detach()]
return [batch_multi_agent(v, n_agents) for v in output] |
class ResUNet2SP(ME.MinkowskiNetwork):
NORM_TYPE = None
BLOCK_NORM_TYPE = 'BN'
CHANNELS = [None, 32, 64, 128, 256]
TR_CHANNELS = [None, 32, 64, 64, 128]
REGION_TYPE = ME.RegionType.HYPER_CUBE
def __init__(self, in_channels=3, out_channels=32, bn_momentum=0.1, conv1_kernel_size=3, normalize_feature=False, D=3):
ME.MinkowskiNetwork.__init__(self, D)
NORM_TYPE = self.NORM_TYPE
BLOCK_NORM_TYPE = self.BLOCK_NORM_TYPE
CHANNELS = self.CHANNELS
TR_CHANNELS = self.TR_CHANNELS
REGION_TYPE = self.REGION_TYPE
self.normalize_feature = normalize_feature
self.conv1 = conv(in_channels=in_channels, out_channels=CHANNELS[1], kernel_size=conv1_kernel_size, stride=1, dilation=1, bias=False, region_type=ME.RegionType.HYPER_CUBE, dimension=D)
self.norm1 = get_norm(NORM_TYPE, CHANNELS[1], bn_momentum=bn_momentum, dimension=D)
self.block1 = get_block(BLOCK_NORM_TYPE, CHANNELS[1], CHANNELS[1], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.pool2 = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
self.conv2 = conv(in_channels=CHANNELS[1], out_channels=CHANNELS[2], kernel_size=3, stride=1, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.norm2 = get_norm(NORM_TYPE, CHANNELS[2], bn_momentum=bn_momentum, dimension=D)
self.block2 = get_block(BLOCK_NORM_TYPE, CHANNELS[2], CHANNELS[2], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.pool3 = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
self.conv3 = conv(in_channels=CHANNELS[2], out_channels=CHANNELS[3], kernel_size=3, stride=1, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.norm3 = get_norm(NORM_TYPE, CHANNELS[3], bn_momentum=bn_momentum, dimension=D)
self.block3 = get_block(BLOCK_NORM_TYPE, CHANNELS[3], CHANNELS[3], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.pool4 = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
self.conv4 = conv(in_channels=CHANNELS[3], out_channels=CHANNELS[4], kernel_size=3, stride=1, dilation=1, bias=False, region_type=ME.RegionType.HYPER_CUBE, dimension=D)
self.norm4 = get_norm(NORM_TYPE, CHANNELS[4], bn_momentum=bn_momentum, dimension=D)
self.block4 = get_block(BLOCK_NORM_TYPE, CHANNELS[4], CHANNELS[4], bn_momentum=bn_momentum, region_type=ME.RegionType.HYPER_CUBE, dimension=D)
self.conv4_tr = conv_tr(in_channels=CHANNELS[4], out_channels=TR_CHANNELS[4], kernel_size=3, stride=2, dilation=1, bias=False, region_type=ME.RegionType.HYPER_CUBE, dimension=D)
self.norm4_tr = get_norm(NORM_TYPE, TR_CHANNELS[4], bn_momentum=bn_momentum, dimension=D)
self.block4_tr = get_block(BLOCK_NORM_TYPE, TR_CHANNELS[4], TR_CHANNELS[4], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv3_tr = conv_tr(in_channels=(CHANNELS[3] + TR_CHANNELS[4]), out_channels=TR_CHANNELS[3], kernel_size=3, stride=2, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.norm3_tr = get_norm(NORM_TYPE, TR_CHANNELS[3], bn_momentum=bn_momentum, dimension=D)
self.block3_tr = get_block(BLOCK_NORM_TYPE, TR_CHANNELS[3], TR_CHANNELS[3], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv2_tr = conv_tr(in_channels=(CHANNELS[2] + TR_CHANNELS[3]), out_channels=TR_CHANNELS[2], kernel_size=3, stride=2, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.norm2_tr = get_norm(NORM_TYPE, TR_CHANNELS[2], bn_momentum=bn_momentum, dimension=D)
self.block2_tr = get_block(BLOCK_NORM_TYPE, TR_CHANNELS[2], TR_CHANNELS[2], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D)
self.conv1_tr = conv(in_channels=(CHANNELS[1] + TR_CHANNELS[2]), out_channels=TR_CHANNELS[1], kernel_size=1, stride=1, dilation=1, bias=False, dimension=D)
self.final = ME.MinkowskiConvolution(in_channels=TR_CHANNELS[1], out_channels=out_channels, kernel_size=1, stride=1, dilation=1, bias=True, dimension=D)
def forward(self, x):
out_s1 = self.conv1(x)
out_s1 = self.norm1(out_s1)
out_s1 = self.block1(out_s1)
out = MEF.relu(out_s1)
out_s2 = self.pool2(out)
out_s2 = self.conv2(out_s2)
out_s2 = self.norm2(out_s2)
out_s2 = self.block2(out_s2)
out = MEF.relu(out_s2)
out_s4 = self.pool3(out)
out_s4 = self.conv3(out_s4)
out_s4 = self.norm3(out_s4)
out_s4 = self.block3(out_s4)
out = MEF.relu(out_s4)
out_s8 = self.pool4(out)
out_s8 = self.conv4(out_s8)
out_s8 = self.norm4(out_s8)
out_s8 = self.block4(out_s8)
out = MEF.relu(out_s8)
out = self.conv4_tr(out)
out = self.norm4_tr(out)
out = self.block4_tr(out)
out_s4_tr = MEF.relu(out)
out = ME.cat(out_s4_tr, out_s4)
out = self.conv3_tr(out)
out = self.norm3_tr(out)
out = self.block3_tr(out)
out_s2_tr = MEF.relu(out)
out = ME.cat(out_s2_tr, out_s2)
out = self.conv2_tr(out)
out = self.norm2_tr(out)
out = self.block2_tr(out)
out_s1_tr = MEF.relu(out)
out = ME.cat(out_s1_tr, out_s1)
out = self.conv1_tr(out)
out = MEF.relu(out)
out = self.final(out)
if self.normalize_feature:
return ME.SparseTensor((out.F / (torch.norm(out.F, p=2, dim=1, keepdim=True) + 1e-08)), coordinate_map_key=out.coordinate_map_key, coordinate_manager=out.coordinate_manager)
else:
return out |
_grad()
def concat_all_gather_without_backprop(x: Tensor, dim: int=0) -> Tensor:
if (dist.is_available() and dist.is_initialized()):
tensors_gather = [torch.ones_like(x) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, x, async_op=False)
output = torch.cat(tensors_gather, dim=dim)
else:
output = x
return output |
class GeneratorTest(unittest.TestCase):
depth_base = 25.0
depth_step = 0.25
batch_size = 2
dataset_path = '/Volumes/Bahia/kitti-dataset'
def test_generate_batch(self):
generator = KittiGenerator(self.dataset_path, self.depth_base, self.depth_step)
start_time = time.time()
num_set_same_img = 1
batch = generator.validation_batch(multipatch=False, num_set_same_img=num_set_same_img)
duration = (time.time() - start_time)
print(('PSV Time for batch with %s patches per image: (%.3f sec)' % (num_set_same_img, duration)))
print(('Batch shape is -> %s' % str(batch['target'].shape)))
size = sum([batch['planes'][key].nbytes for key in batch['planes']])
print(('Batch size: %s bytes' % size))
save_to = '/Users/boyander/MASTER_CVC/DepthEstimation/DepthEstimation/batch_test'
print(('saving batch: %s' % save_to))
reprojected_images_plot(batch)
print('Done saving batch!')
def test_meanzero(self):
input_organizer_zero = InputOrganizer(batch_size=self.batch_size, meanzero=True)
input_organizer_normal = InputOrganizer(batch_size=self.batch_size, meanzero=False)
generator = KittiGenerator(self.dataset_path, self.depth_base, self.depth_step)
batch = generator.next_batch(multipatch=True)
b_zero = input_organizer_zero.get_feed_dict([batch])
b_normal = input_organizer_normal.get_feed_dict([batch])
mean_zero = np.mean(b_zero[input_organizer_zero.get_target_placeholder().name])
mean_normal = np.mean(b_normal[input_organizer_normal.get_target_placeholder().name])
print(('Mean on target is -> (%s for organizer meanzero) (%s for organizer normal)' % (mean_zero, mean_normal)))
sub_zero = np.sum(np.subtract(np.abs(mean_zero), np.abs(mean_zero)))
print(('Zero substraction is %s' % sub_zero))
def test_multiworker(self):
input_organizer = InputOrganizer(batch_size=self.batch_size, meanzero=True)
kitti_params = KittiParams(self.dataset_path, self.depth_base, self.depth_step, patches_per_set=2)
generator = GeneratorQueued(kitti_params, input_organizer)
for i in xrange(100):
start_time = time.time()
batch = generator.get_batch()
duration = (time.time() - start_time)
print(('%s - Time for batch of %s PSV: (%.3f sec)' % (i, 5, duration)))
def test_dot_p_merge(self):
a = tf.Variable([[1.0, 2.0], [3.0, 4.0]])
a = tf.expand_dims(tf.expand_dims(a, 0), 3)
a = tf.concat(3, [a, a, a])
b = tf.Variable([[0.5, 0.5], [0.5, 0.5]])
b = tf.expand_dims(tf.expand_dims(b, 0), 3)
dotp = dotpMerge([a], b, numplanes=1)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
result = sess.run(dotp)
print(result) |
def test_isotropic_eddington_selfconsist_dehnencore_dens_directint():
pot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
dfp = eddingtondf(pot=pot)
tol = 0.01
check_dens_directint(dfp, pot, tol, (lambda r: pot.dens(r, 0)), rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31)
return None |
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for (dilation, padding) in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range((len(self.conv2d_list) - 1)):
out += self.conv2d_list[(i + 1)](x)
return out |
class NormalizeImageDict(object):
def __init__(self, image_keys, normalizeRange=True):
self.image_keys = image_keys
self.normalizeRange = normalizeRange
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def __call__(self, sample):
for key in self.image_keys:
if self.normalizeRange:
sample[key] /= 255.0
sample[key] = self.normalize(sample[key])
return sample |
class MomentumUpdaterHook(Hook):
def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9):
if (warmup is not None):
if (warmup not in ['constant', 'linear', 'exp']):
raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"')
if (warmup is not None):
assert (warmup_iters > 0), '"warmup_iters" must be a positive integer'
assert (0 < warmup_ratio <= 1.0), '"warmup_momentum" must be in range (0,1]'
self.by_epoch = by_epoch
self.warmup = warmup
self.warmup_iters = warmup_iters
self.warmup_ratio = warmup_ratio
self.base_momentum = []
self.regular_momentum = []
def _set_momentum(self, runner, momentum_groups):
if isinstance(runner.optimizer, dict):
for (k, optim) in runner.optimizer.items():
for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]):
if ('momentum' in param_group.keys()):
param_group['momentum'] = mom
elif ('betas' in param_group.keys()):
param_group['betas'] = (mom, param_group['betas'][1])
else:
for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups):
if ('momentum' in param_group.keys()):
param_group['momentum'] = mom
elif ('betas' in param_group.keys()):
param_group['betas'] = (mom, param_group['betas'][1])
def get_momentum(self, runner, base_momentum):
raise NotImplementedError
def get_regular_momentum(self, runner):
if isinstance(runner.optimizer, dict):
momentum_groups = {}
for k in runner.optimizer.keys():
_momentum_group = [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum[k]]
momentum_groups.update({k: _momentum_group})
return momentum_groups
else:
return [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum]
def get_warmup_momentum(self, cur_iters):
def _get_warmup_momentum(cur_iters, regular_momentum):
if (self.warmup == 'constant'):
warmup_momentum = [(_momentum / self.warmup_ratio) for _momentum in regular_momentum]
elif (self.warmup == 'linear'):
k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio))
warmup_momentum = [(_momentum / (1 - k)) for _momentum in regular_momentum]
elif (self.warmup == 'exp'):
k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters)))
warmup_momentum = [(_momentum / k) for _momentum in regular_momentum]
return warmup_momentum
if isinstance(self.regular_momentum, dict):
momentum_groups = {}
for (key, regular_momentum) in self.regular_momentum.items():
momentum_groups[key] = _get_warmup_momentum(cur_iters, regular_momentum)
return momentum_groups
else:
return _get_warmup_momentum(cur_iters, self.regular_momentum)
def before_run(self, runner):
if isinstance(runner.optimizer, dict):
self.base_momentum = {}
for (k, optim) in runner.optimizer.items():
for group in optim.param_groups:
if ('momentum' in group.keys()):
group.setdefault('initial_momentum', group['momentum'])
else:
group.setdefault('initial_momentum', group['betas'][0])
_base_momentum = [group['initial_momentum'] for group in optim.param_groups]
self.base_momentum.update({k: _base_momentum})
else:
for group in runner.optimizer.param_groups:
if ('momentum' in group.keys()):
group.setdefault('initial_momentum', group['momentum'])
else:
group.setdefault('initial_momentum', group['betas'][0])
self.base_momentum = [group['initial_momentum'] for group in runner.optimizer.param_groups]
def before_train_epoch(self, runner):
if (not self.by_epoch):
return
self.regular_momentum = self.get_regular_momentum(runner)
self._set_momentum(runner, self.regular_momentum)
def before_train_iter(self, runner):
cur_iter = runner.iter
if (not self.by_epoch):
self.regular_momentum = self.get_regular_momentum(runner)
if ((self.warmup is None) or (cur_iter >= self.warmup_iters)):
self._set_momentum(runner, self.regular_momentum)
else:
warmup_momentum = self.get_warmup_momentum(cur_iter)
self._set_momentum(runner, warmup_momentum)
elif self.by_epoch:
if ((self.warmup is None) or (cur_iter > self.warmup_iters)):
return
elif (cur_iter == self.warmup_iters):
self._set_momentum(runner, self.regular_momentum)
else:
warmup_momentum = self.get_warmup_momentum(cur_iter)
self._set_momentum(runner, warmup_momentum) |
def create_gaussian_diffusion(*, steps=1000, learn_sigma=False, sigma_small=False, noise_schedule='linear', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False, timestep_respacing='', use_entropy_scale=False):
betas = gd.get_named_beta_schedule(noise_schedule, steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if (not timestep_respacing):
timestep_respacing = [steps]
return SpacedDiffusion(use_timesteps=space_timesteps(steps, timestep_respacing), betas=betas, model_mean_type=(gd.ModelMeanType.EPSILON if (not predict_xstart) else gd.ModelMeanType.START_X), model_var_type=((gd.ModelVarType.FIXED_LARGE if (not sigma_small) else gd.ModelVarType.FIXED_SMALL) if (not learn_sigma) else gd.ModelVarType.LEARNED_RANGE), loss_type=loss_type, rescale_timesteps=rescale_timesteps, use_entropy_scale=use_entropy_scale) |
def evalfxn(val_method, model, dataloader, criterion, args, num_classes, **kwargs):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4f')
top1 = AverageMeter('', ':6.2f')
if (num_classes >= 5):
top5 = AverageMeter('', ':6.2f')
else:
top5 = AverageMeter('', ':6.2f')
if (val_method in ['pgd', 'auto']):
top1_adv = AverageMeter('', ':6.2f')
if (num_classes >= 5):
top5_adv = AverageMeter('', ':6.2f')
else:
top5_adv = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(dataloader), [batch_time, data_time, losses, top1, top5, top1_adv, top5_adv], prefix='Test: ')
elif (val_method == 'baseline'):
progress = ProgressMeter(len(dataloader), [batch_time, data_time, losses, top1, top5], prefix='Test: ')
else:
raise ValueError(f'Trainer {val_method} not supported')
model.eval()
end = time.time()
for (i, (images, targets)) in enumerate(dataloader):
data_time.update((time.time() - end))
(images, targets) = (images.cuda(args.gpu, non_blocking=True), targets.cuda(args.gpu, non_blocking=True))
logits = model(images)
(acc1, acc5) = accuracy(logits, targets, topk=(1, (5 if (num_classes >= 5) else 2)))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
if (val_method in ['pgd', 'auto']):
(logits_adv, loss) = get_adversarial_loss(val_method, model, images, targets, logits, criterion, None, args)
(acc1_adv, acc5_adv) = accuracy(logits_adv, targets, topk=(1, (5 if (num_classes >= 5) else 2)))
top1_adv.update(acc1_adv[0], images.size(0))
top5_adv.update(acc5_adv[0], images.size(0))
elif (val_method in ['baseline']):
loss = criterion(logits, targets)
losses.update(loss.item(), images.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((i % args.print_freq) == 0) and (args.rank == 0)):
progress.display(i)
if (val_method in ['pgd', 'auto']):
result = {'top1': top1.avg, f'top{(5 if (num_classes >= 5) else 2)}': top5.avg, 'top1_adv': top1_adv.avg, f'top{(5 if (num_classes >= 5) else 2)}_adv': top5_adv.avg}
elif (val_method in ['baseline']):
result = {'top1': top1.avg, f'top{(5 if (num_classes >= 5) else 2)}': top5.avg}
else:
ValueError(f'{val_method} validation method not supported!')
return result |
class ToSpaceBGR(object):
def __init__(self, is_bgr):
self.is_bgr = is_bgr
def __call__(self, tensor):
if self.is_bgr:
new_tensor = tensor.clone()
new_tensor[0] = tensor[2]
new_tensor[2] = tensor[0]
tensor = new_tensor
return tensor |
def main(args):
dataset = load_dataset(args.dataset, split='train')
def truncate(sample):
sample['input_ids'] = sample['input_ids'][0:args.truncate]
sample['labels'] = sample['labels'][0:args.truncate]
sample['attention_mask'] = sample['attention_mask'][0:args.truncate]
return sample
dataset = dataset.map(truncate, desc='Truncating', num_proc=args.num_proc)
dataset.save_to_disk(args.output) |
def server():
parser = argparse.ArgumentParser()
options.add_server_args(parser)
options.add_data_args(parser)
args = parser.parse_args()
simuleval.online.start_server(args) |
def get_feature_names(df):
ks = list(df.keys())
feat_names = [k for k in ks if ((not k.startswith('y')) and (not k.startswith('Y')) and (not k.startswith('Z')) and (not k.startswith('pixel')) and (not (k in ['catIdx', 'cell_num', 'pid', 'valid', 'X', 'X_pvals', 'x_pos', 'X_starts', 'X_ends', 'X_extended', 'short', 'long', 'hotspots', 'sig_idxs', 'X_max_around_Y_peak', 'X_max_after_Y_peak', 'X_max_diff', 'X_peak_idx', 't', 'x_pos_seq', 'y_pos_seq', 'X_smooth_spl', 'X_smooth_spl_dx', 'X_smooth_spl_d2x', 'X_quantiles'])))]
return feat_names |
def setup_training(mode, generator, discriminator, generator_batcher, discriminator_batcher):
train_dir = os.path.join(FLAGS.log_root, 'train')
if (not os.path.exists(train_dir)):
os.makedirs(train_dir)
if FLAGS.restore_best_model:
restore_best_model()
return
saver = tf.train.Saver(max_to_keep=3)
supervisor = tf.train.Supervisor(logdir=train_dir, is_chief=True, saver=saver, summary_op=None, save_summaries_secs=60, save_model_secs=60, global_step=generator.global_step)
summary_writer = supervisor.summary_writer
sess_context_manager = supervisor.prepare_or_wait_for_session(config=util.get_config())
try:
if (mode == 'pretrain'):
trainer.pretrain_generator(generator, generator_batcher, summary_writer, sess_context_manager)
elif (mode == 'train'):
trainer.pretrain_discriminator(discriminator, sess_context_manager)
trainer.adversarial_train(generator, discriminator, generator_batcher, discriminator_batcher, summary_writer, sess_context_manager)
else:
raise ValueError('Caught invalid value of mode!')
except KeyboardInterrupt:
tf.logging.info('Caught keyboard interrupt on worker. Stopping supervisor...')
supervisor.stop() |
def load_tweet_users_posted_rumours():
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tweet_users_posted_rumours'), 'rb') as outfile:
rumour_users = pickle.load(outfile)
outfile.close()
return rumour_users |
def convert_to_trainID(maskpath, out_mask_dir, is_train, clsID_to_trID=full_clsID_to_trID, suffix=''):
mask = np.array(Image.open(maskpath))
mask_copy = (np.ones_like(mask, dtype=np.uint8) * 255)
for (clsID, trID) in clsID_to_trID.items():
mask_copy[(mask == clsID)] = trID
seg_filename = (osp.join(out_mask_dir, ('train' + suffix), osp.basename(maskpath)) if is_train else osp.join(out_mask_dir, ('val' + suffix), osp.basename(maskpath)))
if ((len(np.unique(mask_copy)) == 1) and (np.unique(mask_copy)[0] == 255)):
return
Image.fromarray(mask_copy).save(seg_filename, 'PNG') |
def init_model(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if (args.rank <= 0):
logging('Rank {}, init model: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init model: {} instances'.format(args.rank, init_batch_size))
(src_sents, tgt_sents, src_masks, tgt_masks) = train_iter.get_batch(init_batch_size)
if (args.rank <= 0):
logging('maximum sentence length (src, tgt): {}, {}'.format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print('maximum sentence length (src, tgt): {}, {}'.format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0) |
def pretend_to_be_nnUNetTrainer(folder, checkpoints=('model_best.model.pkl', 'model_final_checkpoint.model.pkl')):
pretend_to_be_other_trainer(folder, 'nnUNetTrainer', checkpoints) |
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument('--entry-point', type=str, action='append', default=None)
parser.add_argument('--arguments', metavar='KEY=VALUE', nargs='+', action='append', help='Set kv pairs used as args for the entry point script.')
parser.add_argument('--seeds-per-script', type=int, default=1)
parser.add_argument('--scripts-per-job', type=int, default=1, help='configs')
return parser |
def conv1x1(in_planes: int, out_planes: int, stride: int=1) -> HalutConv2d:
return HalutConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) |
def create_arg_parser():
parser = argparse.ArgumentParser(description='Preprocess the europarl corpus for the die-dat task.')
parser.add_argument('--path', help='Path to the corpus file.', metavar='path', default='data/raw/europarl/europarl-v7.nl-en.nl')
parser.add_argument('--number', help='Number of examples in the output dataset', type=int, default=10000)
return parser |
.xfail
.parametrize('mass', [30.0])
def test_horizon_with_network_against_single_detector(mass):
params = {'mass_1': mass, 'mass_2': mass, 'theta_jn': 0.0, 'psi': 0.0, 'phase': 0.0, 'geocent_time': 0.0, 'ra': 1.0, 'dec': 1.0}
et_ce_network = Network(['ET', 'CE1'], fisher_parameters=[], parameters=[])
et_network = Network(['ET'], fisher_parameters=[], parameters=[])
et_detector = Detector('ET', fisher_parameters=[], parameters=[])
(d1, z1) = horizon(params, et_detector)
(d2, z2) = horizon(params, et_network)
(d3, z3) = horizon(params, et_ce_network)
assert np.isclose(d1, d2)
assert np.isclose(z1, z2)
assert ((d1 * 1.2) < d3)
assert ((z1 * 1.2) < z3) |
class AFNB(nn.Module):
def __init__(self, low_in_channels, high_in_channels, channels, out_channels, query_scales, key_pool_scales, conv_cfg, norm_cfg, act_cfg):
super(AFNB, self).__init__()
self.stages = nn.ModuleList()
for query_scale in query_scales:
self.stages.append(SelfAttentionBlock(low_in_channels=low_in_channels, high_in_channels=high_in_channels, channels=channels, out_channels=out_channels, share_key_query=False, query_scale=query_scale, key_pool_scales=key_pool_scales, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.bottleneck = ConvModule((out_channels + high_in_channels), out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
def forward(self, low_feats, high_feats):
priors = [stage(high_feats, low_feats) for stage in self.stages]
context = torch.stack(priors, dim=0).sum(dim=0)
output = self.bottleneck(torch.cat([context, high_feats], 1))
return output |
def build_criterion(cfg, device):
return registry.CRITERION[cfg.MODEL.CRITERION.NAME](cfg).to(device=device) |
def write_EHRinstance_to_example_files(seqs, max_seq_length, max_predictions_per_seq, masked_lm_prob, vocab, output_files, rng):
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
min_seq_l = max_seq_length
max_seq_l = 0
for (seq_index, seq) in enumerate(seqs):
if (len(seq[2]) > max_seq_length):
continue
if (len(seq[2]) < min_seq_l):
min_seq_l = len(seq[2])
print(min_seq_l)
if (len(seq[2]) > max_seq_l):
max_seq_l = len(seq[2])
print(max_seq_l)
input_seq = seq[2]
input_mask = ([1] * len(input_seq))
segment_ids = seq[3]
(input_ids, masked_lm_positions, masked_lm_ids) = create_masked_EHR_predictions(input_seq, masked_lm_prob, max_predictions_per_seq, vocab, rng)
assert (len(input_ids) <= max_seq_length)
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
masked_lm_weights = ([1.0] * len(masked_lm_ids))
while (len(masked_lm_positions) < max_predictions_per_seq):
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = seq[1]
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(input_mask)
features['segment_ids'] = create_int_feature(segment_ids)
features['masked_lm_positions'] = create_int_feature(masked_lm_positions)
features['masked_lm_ids'] = create_int_feature(masked_lm_ids)
features['masked_lm_weights'] = create_float_feature(masked_lm_weights)
features['next_sentence_labels'] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = ((writer_index + 1) % len(writers))
total_written += 1
if (seq_index < 20):
tf.logging.info('*** Example ***')
tf.logging.info('tokens: ', seq)
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(('%s: %s' % (feature_name, ' '.join([str(x) for x in values]))))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written) |
def setup_output_folder(save_dir: str, folder_only: bool=False):
log_filename = 'train_'
log_filename += time.strftime('%Y_%m_%dT%H_%M_%S')
log_filename += '.log'
log_folder = os.path.join(save_dir, 'logs')
if (not os.path.exists(log_folder)):
os.path.mkdirs(log_folder)
if folder_only:
return log_folder
log_filename = os.path.join(log_folder, log_filename)
return log_filename |
.dataclass
class FlaxBaseModelOutputWithPast(ModelOutput):
last_hidden_state: jnp.ndarray = None
past_key_values: Optional[Dict[(str, jnp.ndarray)]] = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None
attentions: Optional[Tuple[jnp.ndarray]] = None |
def test_sparse_prior():
from mmdet.models.task_modules.prior_generators import MlvlPointGenerator
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_points.grid_priors(featmap_sizes=featmap_sizes, with_stride=False, device='cpu')
sparse_prior = mlvl_points.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cpu')
assert (not sparse_prior.is_cuda)
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import AnchorGenerator
mlvl_anchors = AnchorGenerator(strides=[16, 32], ratios=[1.0], scales=[1.0], base_sizes=[4, 8])
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_anchors.grid_priors(featmap_sizes=featmap_sizes, device='cpu')
sparse_prior = mlvl_anchors.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cpu')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32], ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cpu')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(strides=[32, 16, 8], base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cpu')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
if torch.cuda.is_available():
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6, 9]).long().cuda()
featmap_sizes = [(6, 8), (6, 4)]
grid_anchors = mlvl_points.grid_priors(featmap_sizes=featmap_sizes, with_stride=False, device='cuda')
sparse_prior = mlvl_points.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
assert sparse_prior.is_cuda
mlvl_anchors = AnchorGenerator(strides=[16, 32], ratios=[1.0, 2.5], scales=[1.0, 5.0], base_sizes=[4, 8])
prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6, 9]).long().cuda()
featmap_sizes = [(13, 5), (16, 4)]
grid_anchors = mlvl_anchors.grid_priors(featmap_sizes=featmap_sizes, device='cuda')
sparse_prior = mlvl_anchors.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[0], level_idx=0, device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(prior_idxs=prior_indexs, featmap_size=featmap_sizes[1], level_idx=1, device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32], ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cuda')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(strides=[32, 16, 8], base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(prior_idxs=prior_indexs, level_idx=i, featmap_size=featmap_sizes[i], device='cuda')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all() |
def main():
args = parser.parse_args()
(model, val_loader, dictionary, w_matrix) = load_model(args)
return (model, val_loader, dictionary, w_matrix) |
def get_header(path):
with open(path) as f:
header = next(csv.reader(f))
return header |
class LIRCMOP7(LIRCMOP5):
def __init__(self, number_of_variables: int=30):
super(LIRCMOP7, self).__init__(number_of_variables)
def evaluate_constraints(self, solution: FloatSolution) -> FloatSolution:
constraints = [0.0 for _ in range(self.number_of_constraints())]
r = 0.1
theta = ((- 0.25) * pi)
a_array = [2.0, 2.5, 2.5]
b_array = [6.0, 12.0, 10.0]
x_offset = [1.2, 2.25, 3.5]
y_offset = [1.2, 2.25, 3.5]
f1 = solution.objectives[0]
f2 = solution.objectives[1]
for i in range(len(x_offset)):
constraints[i] = ((pow(((((f1 - x_offset[i]) * cos(theta)) - ((f2 - y_offset[i]) * sin(theta))) / a_array[i]), 2) + pow(((((f1 - x_offset[i]) * sin(theta)) + ((f2 - y_offset[i]) * cos(theta))) / b_array[i]), 2)) - r)
solution.constraints = constraints
return solution
def name(self):
return 'LIR-CMOP7' |
class TestUpdateFunctions(object):
torch_values = {'sgd': [0., 0., 0.], 'momentum': [0., 0., 0.], 'nesterov_momentum': [0., 0., 0.], 'adagrad': [0., 0., 0.], 'rmsprop': [0., 0., 0.], 'adadelta': [0., 0., 0.], 'adam': [0., 0., 0.], 'adamax': [0., 0., 0.]}
def f(self, X):
return ([0.1, 0.2, 0.3] * (X ** 2)).sum()
.parametrize('method, kwargs', [['sgd', {'learning_rate': 0.1}], ['momentum', {'learning_rate': 0.1, 'momentum': 0.5}], ['nesterov_momentum', {'learning_rate': 0.1, 'momentum': 0.5}], ['adagrad', {'learning_rate': 0.1}], ['rmsprop', {'learning_rate': 0.01}], ['adadelta', {}], ['adam', {'learning_rate': 0.01}], ['adamax', {'learning_rate': 0.01}]])
def test_updates(self, method, kwargs):
A = theano.shared(lasagne.utils.floatX([1, 1, 1]))
B = theano.shared(lasagne.utils.floatX([1, 1, 1]))
update_func = getattr(lasagne.updates, method)
updates = update_func((self.f(A) + self.f(B)), [A, B], **kwargs)
do_update = theano.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.get_value(), B.get_value())
assert np.allclose(A.get_value(), self.torch_values[method]) |
class ModelWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModelWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
outputs = self.model(batch['input'])
(loss, loss_stats) = self.loss(outputs, batch)
return (outputs[(- 1)], loss, loss_stats) |
class NASNetTest(tf.test.TestCase):
def testBuildLogitsCifarModel(self):
batch_size = 5
(height, width) = (32, 32)
num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(logits, end_points) = nasnet.build_nasnet_cifar(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(logits, end_points) = nasnet.build_nasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testBuildLogitsLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
(logits, end_points) = nasnet.build_nasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testBuildPreLogitsCifarModel(self):
batch_size = 5
(height, width) = (32, 32)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(net, end_points) = nasnet.build_nasnet_cifar(inputs, num_classes)
self.assertFalse(('AuxLogits' in end_points))
self.assertFalse(('Predictions' in end_points))
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 768])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(net, end_points) = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertFalse(('AuxLogits' in end_points))
self.assertFalse(('Predictions' in end_points))
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1056])
def testBuildPreLogitsLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
(net, end_points) = nasnet.build_nasnet_large(inputs, num_classes)
self.assertFalse(('AuxLogits' in end_points))
self.assertFalse(('Predictions' in end_points))
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4032])
def testAllEndPointsShapesCifarModel(self):
batch_size = 5
(height, width) = (32, 32)
num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(_, end_points) = nasnet.build_nasnet_cifar(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 32, 32, 96], 'Cell_0': [batch_size, 32, 32, 192], 'Cell_1': [batch_size, 32, 32, 192], 'Cell_2': [batch_size, 32, 32, 192], 'Cell_3': [batch_size, 32, 32, 192], 'Cell_4': [batch_size, 32, 32, 192], 'Cell_5': [batch_size, 32, 32, 192], 'Cell_6': [batch_size, 16, 16, 384], 'Cell_7': [batch_size, 16, 16, 384], 'Cell_8': [batch_size, 16, 16, 384], 'Cell_9': [batch_size, 16, 16, 384], 'Cell_10': [batch_size, 16, 16, 384], 'Cell_11': [batch_size, 16, 16, 384], 'Cell_12': [batch_size, 8, 8, 768], 'Cell_13': [batch_size, 8, 8, 768], 'Cell_14': [batch_size, 8, 8, 768], 'Cell_15': [batch_size, 8, 8, 768], 'Cell_16': [batch_size, 8, 8, 768], 'Cell_17': [batch_size, 8, 8, 768], 'Reduction_Cell_0': [batch_size, 16, 16, 256], 'Reduction_Cell_1': [batch_size, 8, 8, 512], 'global_pool': [batch_size, 768], 'AuxLogits': [batch_size, num_classes], 'Logits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue((endpoint_name in end_points))
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testNoAuxHeadCifarModel(self):
batch_size = 5
(height, width) = (32, 32)
num_classes = 10
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.cifar_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(_, end_points) = nasnet.build_nasnet_cifar(inputs, num_classes, config=config)
self.assertEqual(('AuxLogits' in end_points), use_aux_head)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(_, end_points) = nasnet.build_nasnet_mobile(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 28, 28, 88], 'Cell_0': [batch_size, 28, 28, 264], 'Cell_1': [batch_size, 28, 28, 264], 'Cell_2': [batch_size, 28, 28, 264], 'Cell_3': [batch_size, 28, 28, 264], 'Cell_4': [batch_size, 14, 14, 528], 'Cell_5': [batch_size, 14, 14, 528], 'Cell_6': [batch_size, 14, 14, 528], 'Cell_7': [batch_size, 14, 14, 528], 'Cell_8': [batch_size, 7, 7, 1056], 'Cell_9': [batch_size, 7, 7, 1056], 'Cell_10': [batch_size, 7, 7, 1056], 'Cell_11': [batch_size, 7, 7, 1056], 'Reduction_Cell_0': [batch_size, 14, 14, 352], 'Reduction_Cell_1': [batch_size, 7, 7, 704], 'global_pool': [batch_size, 1056], 'AuxLogits': [batch_size, num_classes], 'Logits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue((endpoint_name in end_points))
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testNoAuxHeadMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(_, end_points) = nasnet.build_nasnet_mobile(inputs, num_classes, config=config)
self.assertEqual(('AuxLogits' in end_points), use_aux_head)
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
(_, end_points) = nasnet.build_nasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 336], 'Cell_0': [batch_size, 42, 42, 1008], 'Cell_1': [batch_size, 42, 42, 1008], 'Cell_2': [batch_size, 42, 42, 1008], 'Cell_3': [batch_size, 42, 42, 1008], 'Cell_4': [batch_size, 42, 42, 1008], 'Cell_5': [batch_size, 42, 42, 1008], 'Cell_6': [batch_size, 21, 21, 2016], 'Cell_7': [batch_size, 21, 21, 2016], 'Cell_8': [batch_size, 21, 21, 2016], 'Cell_9': [batch_size, 21, 21, 2016], 'Cell_10': [batch_size, 21, 21, 2016], 'Cell_11': [batch_size, 21, 21, 2016], 'Cell_12': [batch_size, 11, 11, 4032], 'Cell_13': [batch_size, 11, 11, 4032], 'Cell_14': [batch_size, 11, 11, 4032], 'Cell_15': [batch_size, 11, 11, 4032], 'Cell_16': [batch_size, 11, 11, 4032], 'Cell_17': [batch_size, 11, 11, 4032], 'Reduction_Cell_0': [batch_size, 21, 21, 1344], 'Reduction_Cell_1': [batch_size, 11, 11, 2688], 'global_pool': [batch_size, 4032], 'AuxLogits': [batch_size, num_classes], 'Logits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue((endpoint_name in end_points))
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
(_, end_points) = nasnet.build_nasnet_large(inputs, num_classes, config=config)
self.assertEqual(('AuxLogits' in end_points), use_aux_head)
def testVariablesSetDeviceMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
nasnet.build_nasnet_mobile(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testUnknownBatchSizeMobileModel(self):
batch_size = 1
(height, width) = (224, 224)
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(logits, _) = nasnet.build_nasnet_mobile(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluationMobileModel(self):
batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(logits, _) = nasnet.build_nasnet_mobile(eval_inputs, num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testOverrideHParamsCifarModel(self):
batch_size = 5
(height, width) = (32, 32)
num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.cifar_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(_, end_points) = nasnet.build_nasnet_cifar(inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(), [batch_size, 96, 32, 32])
def testOverrideHParamsMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
(_, end_points) = nasnet.build_nasnet_mobile(inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(), [batch_size, 88, 28, 28])
def testOverrideHParamsLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = nasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
(_, end_points) = nasnet.build_nasnet_large(inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(), [batch_size, 336, 42, 42])
def testCurrentStepCifarModel(self):
batch_size = 5
(height, width) = (32, 32)
num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3))
global_step = tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(logits, end_points) = nasnet.build_nasnet_cifar(inputs, num_classes, current_step=global_step)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testUseBoundedAcitvationCifarModel(self):
batch_size = 1
(height, width) = (32, 32)
num_classes = 10
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
config = nasnet.cifar_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
(_, _) = nasnet.build_nasnet_cifar(inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual((node.op == 'Relu6'), use_bounded_activation) |
class Session(object):
def __init__(self, cfg):
self._cfg = cfg
self._sess_name = self._make_sess_name()
self._main_out_folder_abs = None
self._log_folder_abs = None
self._log = None
def _make_sess_name(self):
sess_name = (self._cfg[self._cfg.SESSION_NAME] if (self._cfg[self._cfg.SESSION_NAME] is not None) else 'Session')
return sess_name
def make_output_folders(self):
raise NotImplementedError('Not implemented virtual function.')
def setup_logger(self):
log_filepath = (((self._log_folder_abs + '/') + self._sess_name) + '.txt')
self._log = loggers.Logger(log_filepath)
def get_logger(self):
return self._log
def override_file_cfg_with_cmd_line_cfg(self, args):
self._cfg.override_file_cfg_with_cmd_line_cfg(self._log, args)
def compile_session_params_from_cfg(self, *args):
raise NotImplementedError('Not implemented virtual function.')
def get_abs_path_to_cfg(self):
return self._cfg.get_abs_path_to_cfg()
def run_session(self, *args):
raise NotImplementedError('Not implemented virtual function.') |
class PhobertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', **kwargs):
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
self.vocab_file = vocab_file
self.merges_file = merges_file
self.encoder = {}
self.encoder[self.bos_token] = 0
self.encoder[self.pad_token] = 1
self.encoder[self.eos_token] = 2
self.encoder[self.unk_token] = 3
self.add_from_file(vocab_file)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[:(- 1)]
merges = [tuple(merge.split()[:(- 1)]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
word = tuple((list(word[:(- 1)]) + [(word[(- 1)] + '</w>')]))
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
word = word[:(- 4)]
self.cache[token] = word
return word
def _tokenize(self, text):
split_tokens = []
words = re.findall('\\S+\\n?', text)
for token in words:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ', '').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
out_merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
if (os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file)):
copyfile(self.merges_file, out_merge_file)
return (out_vocab_file, out_merge_file)
def add_from_file(self, f):
if isinstance(f, str):
try:
with open(f, 'r', encoding='utf-8') as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset')
return
lines = f.readlines()
for lineTmp in lines:
line = lineTmp.strip()
idx = line.rfind(' ')
if (idx == (- 1)):
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
self.encoder[word] = len(self.encoder) |
def run_episode(seed=None, policy=None):
env = generate_env(seed)
state = env.reset()
done = False
episode_reward = 0
while (not done):
action = policy.get_action(state)
(state, reward, done, info) = env.step(action)
episode_reward += reward
output = flatten_dict(info)
output['episode_reward'] = episode_reward
output['name'] = policy.name
return output |
class MaxoutDense(KerasLayer):
def __init__(self, output_dim, nb_feature=4, W_regularizer=None, b_regularizer=None, bias=True, input_dim=None, input_shape=None, **kwargs):
if input_dim:
input_shape = (input_dim,)
super(MaxoutDense, self).__init__(None, output_dim, nb_feature, W_regularizer, b_regularizer, bias, (list(input_shape) if input_shape else None), **kwargs) |
def get_microbiologyevents_extractors(data_dir, extractor_map):
extractors = []
table = 'microbiologyevents'
id_extractor = MultiExtractor(names=['subject_id', 'hadm_id'], sep='_')
outpath = os.path.join(data_dir, (table + '.tsv'))
charttime_ext = TimeExtractor(name='charttime', converter=time2str)
chartdate_ext = TimeExtractor(name='chartdate', converter=date2str)
time_ext = SelectExtractor([charttime_ext, chartdate_ext])
type_ext = FmtExtractor(names=['spec_itemid', 'org_itemid', 'ab_itemid'], fmt='microbioevents.%s&%s&%s')
value_ext = MultiExtractor(names=['dilution_text', 'dilution_comparison', 'dilution_value', 'interpretation'])
test_ext = TestExtractor(name='interpretation', test=exist_test)
extractor = ExtractorInfo(table, outpath, id_extractor, time_ext, type_ext, value_ext, test_ext)
extractors.append(extractor)
extractor_map[table] = extractors
return table |
def get(data_path, seed=0, pc_valid=0.1):
data = {}
taskcla = []
size = [3, 32, 32]
path = os.path.join(data_path, 'binary_cifar')
if (not os.path.isdir(path)):
os.makedirs(path)
mean = [(x / 255) for x in [125.3, 123.0, 113.9]]
std = [(x / 255) for x in [63.0, 62.1, 66.7]]
dat = {}
dat['train'] = datasets.CIFAR10(data_path, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
dat['test'] = datasets.CIFAR10(data_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
for n in range(5):
data[n] = {}
data[n]['name'] = 'cifar10'
data[n]['ncla'] = 2
data[n]['train'] = {'x': [], 'y': []}
data[n]['test'] = {'x': [], 'y': []}
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
for (image, target) in loader:
n = target.cpu().numpy()[0]
nn = (n // 2)
data[nn][s]['x'].append(image)
data[nn][s]['y'].append((n % 2))
dat = {}
dat['train'] = datasets.CIFAR100(data_path, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
dat['test'] = datasets.CIFAR100(data_path, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
for n in range(5, 10):
data[n] = {}
data[n]['name'] = 'cifar100'
data[n]['ncla'] = 20
data[n]['train'] = {'x': [], 'y': []}
data[n]['test'] = {'x': [], 'y': []}
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
for (image, target) in loader:
n = target.cpu().numpy()[0]
nn = ((n // 20) + 5)
data[nn][s]['x'].append(image)
data[nn][s]['y'].append((n % 20))
for t in data.keys():
for s in ['train', 'test']:
data[t][s]['x'] = torch.stack(data[t][s]['x']).view((- 1), size[0], size[1], size[2])
data[t][s]['y'] = torch.LongTensor(np.array(data[t][s]['y'], dtype=int)).view((- 1))
torch.save(data[t][s]['x'], os.path.join(os.path.expanduser(path), ((('data' + str(t)) + s) + 'x.bin')))
torch.save(data[t][s]['y'], os.path.join(os.path.expanduser(path), ((('data' + str(t)) + s) + 'y.bin')))
data = {}
ids = list(shuffle(np.arange(10), random_state=seed))
print('Task order =', ids)
for i in range(10):
data[i] = dict.fromkeys(['name', 'ncla', 'train', 'test'])
for s in ['train', 'test']:
data[i][s] = {'x': [], 'y': []}
data[i][s]['x'] = torch.load(os.path.join(os.path.expanduser(path), ((('data' + str(ids[i])) + s) + 'x.bin')))
data[i][s]['y'] = torch.load(os.path.join(os.path.expanduser(path), ((('data' + str(ids[i])) + s) + 'y.bin')))
data[i]['ncla'] = len(np.unique(data[i]['train']['y'].numpy()))
if (data[i]['ncla'] == 2):
data[i]['name'] = ('cifar10-' + str(ids[i]))
else:
data[i]['name'] = ('cifar100-' + str((ids[i] - 5)))
for t in data.keys():
r = np.arange(data[t]['train']['x'].size(0))
r = np.array(shuffle(r, random_state=seed), dtype=int)
nvalid = int((pc_valid * len(r)))
ivalid = torch.LongTensor(r[:nvalid])
itrain = torch.LongTensor(r[nvalid:])
data[t]['valid'] = {}
data[t]['valid']['x'] = data[t]['train']['x'][ivalid].clone()
data[t]['valid']['y'] = data[t]['train']['y'][ivalid].clone()
data[t]['train']['x'] = data[t]['train']['x'][itrain].clone()
data[t]['train']['y'] = data[t]['train']['y'][itrain].clone()
n = 0
for t in data.keys():
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
return (data, taskcla, size) |
class ScalableModule(BaseModule):
def __init__(self, width_scale=1.0, rescale_init=False, rescale_layer=False):
super(ScalableModule, self).__init__()
if rescale_layer:
self.scaler = Scaler(width_scale)
else:
self.scaler = nn.Identity()
self.rescale_init = rescale_init
self.width_scale = width_scale
def reset_parameters(self, inp_nonscale_layers):
if (self.rescale_init and (self.width_scale != 1.0)):
for (name, m) in self._modules.items():
if (name not in inp_nonscale_layers):
m.apply((lambda _m: scale_init_param(_m, scale_in=(1.0 / self.width_scale))))
def rescale_layer(self):
return (not isinstance(self.scaler, nn.Identity))
_layer.setter
def rescale_layer(self, enable=True):
if enable:
self.scaler = Scaler(self.width_scale)
else:
self.scaler = nn.Identity() |
class Value(nn.Module):
def __init__(self, num_inputs):
super(Value, self).__init__()
self.affine1 = nn.Linear(num_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, x):
x = torch.tanh(self.affine1(x))
x = torch.tanh(self.affine2(x))
state_values = self.value_head(x)
return state_values |
def test_bwar_bat_return_all() -> None:
result = league_batting_stats.bwar_bat(return_all=True)
assert (result is not None)
assert (not result.empty)
bwar_bat_2019 = result.query('year_ID == 2019')
assert (len(bwar_bat_2019.columns) == 49)
assert (len(bwar_bat_2019) == 1567) |
class Cow(Object):
def __init__(self, world, pos):
super().__init__(world, pos)
self.health = 3
def texture(self):
return 'cow'
def update(self):
if (self.health <= 0):
self.world.remove(self)
if (self.random.uniform() < 0.5):
direction = self.random_dir()
self.move(direction) |
def get_parser():
parser = argparse.ArgumentParser(description='convert a json to a transcription file with a token dictionary', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('json', type=str, help='json files')
parser.add_argument('--num-spkrs', type=int, default=1, help='number of speakers')
parser.add_argument('--refs', type=str, nargs='+', help='ref for all speakers')
parser.add_argument('--hyps', type=str, nargs='+', help='hyp for all outputs')
return parser |
def load_pickle_file(pkl_path):
with open(pkl_path, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data |
class RequestOutput():
def __init__(self, request_id: str, prompt: str, prompt_token_ids: List[int], outputs: List[CompletionOutput], finished: bool) -> None:
self.request_id = request_id
self.prompt = prompt
self.prompt_token_ids = prompt_token_ids
self.outputs = outputs
self.finished = finished
def from_seq_group(cls, seq_group: SequenceGroup) -> 'RequestOutput':
n = seq_group.sampling_params.n
seqs = seq_group.get_seqs()
if seq_group.sampling_params.use_beam_search:
sorting_key = (lambda seq: seq.get_beam_search_score(seq_group.sampling_params.length_penalty))
else:
sorting_key = (lambda seq: seq.get_cumulative_logprob())
sorted_seqs = sorted(seqs, key=sorting_key, reverse=True)
top_n_seqs = sorted_seqs[:n]
outputs: List[CompletionOutput] = []
for seq in top_n_seqs:
logprobs = seq.output_logprobs
if (seq_group.sampling_params.logprobs is None):
logprobs = {}
finshed_reason = SequenceStatus.get_finished_reason(seq.status)
output = CompletionOutput(seqs.index(seq), seq.output_text, seq.get_output_token_ids(), seq.get_cumulative_logprob(), logprobs, finshed_reason, seq.get_output_token_latency())
outputs.append(output)
prompt = top_n_seqs[0].prompt
prompt_token_ids = top_n_seqs[0].data.prompt_token_ids
finished = seq_group.is_finished()
return cls(seq_group.request_id, prompt, prompt_token_ids, outputs, finished)
def __repr__(self) -> str:
return f'RequestOutput(request_id={self.request_id}, prompt={self.prompt!r}, prompt_token_ids={self.prompt_token_ids}, outputs={self.outputs}, finished={self.finished})' |
def main():
os.system(f'rm -r $PWD/data')
print('')
print('Testing Classes')
dataset = VesselGraph(root='data', name=selected_dataset, pre_transform=T.LineGraph(force_directed=True))
print()
print(f'Dataset: {dataset}:')
print('')
print(f'Number of graphs: {len(dataset)}')
print(f'Number of features: {dataset.num_features}')
print(f'Number of classes: {dataset.num_classes}')
data = dataset[0]
print(f'Number of nodes: {data.num_nodes}')
print(f'Number of edges: {data.num_edges}')
print(f'Average node degree: {(data.num_edges / data.num_nodes):.2f}')
print(f'Contains isolated nodes: {data.contains_isolated_nodes()}')
print(f'Contains self-loops: {data.contains_self_loops()}')
print(f'Is directed: {data.is_directed()}')
os.system(f'rm -r $PWD/data') |
def _get_test_ids():
data = json.load(open(TEST_5K))
ids = {_get_img_id(d['filename']) for d in data}
return ids |
class AxB(nn.Module):
def __init__(self, nhid):
super(AxB, self).__init__()
self.nhid = nhid
def forward(self, nhA, nhB):
mat = torch.bmm(nhB.view((- 1), 100, self.nhid), nhA.view((- 1), self.nhid, 1))
return mat.view((- 1), 100) |
class ResShortCut_D_Dec(ResNet_D_Dec):
def __init__(self, block, layers, norm_layer=None, large_kernel=False, late_downsample=False):
super(ResShortCut_D_Dec, self).__init__(block, layers, norm_layer, large_kernel, late_downsample=late_downsample)
def forward(self, x, mid_fea):
(fea1, fea2, fea3, fea4, fea5) = mid_fea['shortcut']
x = (self.layer1(x) + fea5)
x = (self.layer2(x) + fea4)
x = (self.layer3(x) + fea3)
x = (self.layer4(x) + fea2)
x = self.conv1(x)
x = self.bn1(x)
x = (self.leaky_relu(x) + fea1)
x = self.conv2(x)
alpha = ((self.tanh(x) + 1.0) / 2.0)
return (alpha, None) |
class LEDForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def accuracy(scores, labels):
num_classes = scores.size((- 2))
predictions = torch.max(scores, dim=(- 2)).indices
accuracies = []
accuracy_mask = (predictions == labels)
for label in range(num_classes):
label_mask = (labels == label)
per_class_accuracy = (accuracy_mask & label_mask).float().sum()
per_class_accuracy /= label_mask.float().sum()
accuracies.append(per_class_accuracy.cpu().item())
accuracies.append(accuracy_mask.float().mean().cpu().item())
return accuracies |
def FloatArrayToRgbImage(float_array, scale_factor=DEFAULT_RGB_SCALE_FACTOR, drop_blue=False):
float_array = np.squeeze(float_array)
scaled_array = np.floor(((float_array * scale_factor) + 0.5))
min_inttype = 0
max_inttype = ((2 ** 24) - 1)
scaled_array = ClipFloatValues(scaled_array, min_inttype, max_inttype)
int_array = scaled_array.astype(np.uint32)
rg = np.divide(int_array, 256)
r = np.divide(rg, 256)
g = np.mod(rg, 256)
image_shape = int_array.shape
rgb_array = np.zeros((image_shape[0], image_shape[1], 3), dtype=np.uint8)
rgb_array[(..., 0)] = r
rgb_array[(..., 1)] = g
if (not drop_blue):
b = np.mod(int_array, 256)
rgb_array[(..., 2)] = b
image_mode = 'RGB'
image = Image.fromarray(rgb_array, mode=image_mode)
return image |
class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def read_model(path, ext):
if (ext == '.txt'):
cameras = read_cameras_text(os.path.join(path, ('cameras' + ext)))
images = read_images_text(os.path.join(path, ('images' + ext)))
points3D = read_points3D_text((os.path.join(path, 'points3D') + ext))
else:
cameras = read_cameras_binary(os.path.join(path, ('cameras' + ext)))
images = read_images_binary(os.path.join(path, ('images' + ext)))
points3D = read_points3d_binary((os.path.join(path, 'points3D') + ext))
return (cameras, images, points3D) |
def plot_counter_dayline_from_node_id(node_id):
data = daily_counts[((daily_counts['node_id'] == str(node_id)) & (daily_counts['day'] == date))]
data = list(data['volume'])[0]
plot_counter_dayline(data) |
class DataCollector():
def __init__(self, full_data: bool, cur_dir):
self.pred_distributions = []
self.attentions = []
self.all_hidden_states = []
self.logits = []
self.input_doc = None
self.input_doc_mask = None
self.meta = None
self.full_data = full_data
self.cur_dir = cur_dir
def add_meta(self, meta):
self.meta = meta
def add_input_doc(self, input_doc, input_doc_msk):
self.input_doc = auto_detach_to_cpu(input_doc, dtype=np.int)
self.input_doc_mask = auto_detach_to_cpu(input_doc_msk)
def add_step(self, pred_distribution, all_hidden_states=None, attentions=None):
self.pred_distributions.append(auto_detach_to_cpu(pred_distribution, dtype=np.float32))
if self.full_data:
self.all_hidden_states.append(auto_detach_to_cpu(all_hidden_states))
self.attentions.append(auto_detach_to_cpu(attentions))
def add_logit(self, logit):
self.logits.append(auto_detach_to_cpu(logit, dtype=np.int))
def write_to_disk_numpy(self):
batchsz = self.input_doc.shape[0]
for i in range(batchsz):
_pred_dist = [x[i] for x in self.pred_distributions]
_pred_dist = np.stack(_pred_dist, axis=0)
ent = entropy(np.exp(_pred_dist), axis=(- 1))
if self.full_data:
_hidden_states = None
_attn = [[y[i] for y in x] for x in self.attentions]
else:
_hidden_states = None
_attn = None
_pred_dist = None
_logit = [x[i] for x in self.logits]
_logit = np.stack(_logit, axis=0)
if self.meta:
_meta = self.meta[i]
if ('name' in _meta):
fname = _meta['name']
else:
fname = _meta['id']
else:
_meta = {'name': '', 'id': ''}
fname = get_random_string(8)
f = f'model_output_{fname}.pt'
with open(os.path.join(self.cur_dir, f), 'wb') as fd:
pickle.dump({'pred_distributions': _pred_dist, 'attentions': _attn, 'all_hidden_states': _hidden_states, 'logits': _logit, 'input_doc': self.input_doc[i], 'input_doc_mask': self.input_doc_mask[i], 'meta': _meta, 'ent': ent}, fd)
logging.debug(f'writing {os.path.join(self.cur_dir, f)}')
print(f'writing {os.path.join(self.cur_dir, f)}')
self.__init__(full_data=self.full_data, cur_dir=self.cur_dir) |
class ImageNetValData():
class ImageNetValDataX():
def __init__(self, dir, filenames, width, height, fashion, transform):
self._dir = dir
self._filenames = filenames
self._width = width
self._height = height
self._fashion = fashion
self._transform = transform
def __len__(self):
return len(self._filenames)
def __getitem__(self, index):
tf.compat.v1.enable_eager_execution()
x = None
for filename in self._filenames[index]:
path = ((self._dir + '/') + filename)
image = tf.image.decode_image(tf.io.read_file(path), channels=3)
if (self._fashion == 'vgg_preprocessing'):
image = self._aspect_preserving_resize(image, 256)
image = self._central_crop([image], self._height, self._width)[0]
image.set_shape([self._height, self._width, 3])
image = tf.cast(image, dtype=tf.float32)
elif (self._fashion == 'inception_preprocessing'):
image = tf.cast(image, tf.float32)
image.set_shape([tf.compat.v1.Dimension(None), tf.compat.v1.Dimension(None), tf.compat.v1.Dimension(3)])
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.compat.v1.image.resize_bilinear(image, [self._width, self._height], align_corners=False)
image = tf.squeeze(image, [0])
else:
raise Exception('Invalid fashion', self._fashion)
if (self._transform is not None):
image = self._transform(image)
image = tf.expand_dims(image, axis=0)
if (x is None):
x = image
else:
x = tf.concat([x, image], 0)
x = x.numpy()
tf.compat.v1.disable_eager_execution()
return x
def _smallest_size_at_least(self, height, width, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.cast(height, dtype=tf.float32)
width = tf.cast(width, dtype=tf.float32)
smallest_side = tf.cast(smallest_side, dtype=tf.float32)
scale = tf.cond(tf.greater(height, width), (lambda : (smallest_side / width)), (lambda : (smallest_side / height)))
new_height = tf.cast(tf.math.rint((height * scale)), dtype=tf.int32)
new_width = tf.cast(tf.math.rint((width * scale)), dtype=tf.int32)
return (new_height, new_width)
def _aspect_preserving_resize(self, image, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
(new_height, new_width) = self._smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.compat.v1.image.resize_bilinear(image, [new_height, new_width], align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def _central_crop(self, image_list, crop_height, crop_width):
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = ((image_height - crop_height) / 2)
offset_width = ((image_width - crop_width) / 2)
outputs.append(self._crop(image, offset_height, offset_width, crop_height, crop_width))
return outputs
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), dtype=tf.int32)
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def __init__(self, width, height, fashion, transform=None, label_offset=0, shuffle=False, seed=None, num_max=None):
self._dir = (common.user_home_dir() + '/EvalDNN-data/ILSVRC2012_img_val')
with open((self._dir + '/ILSVRC2012_validation_ground_truth.txt'), 'r') as f:
lines = f.readlines()
if shuffle:
if (seed is not None):
random.seed(seed)
random.shuffle(lines)
if (num_max is not None):
lines = lines[:num_max]
self._filenames = []
self.y = []
for line in lines:
splits = line.split('---')
if (len(splits) != 5):
continue
self._filenames.append(splits[0])
self.y.append(int(splits[2]))
self.x = self.ImageNetValDataX(self._dir, self._filenames, width, height, fashion, transform)
self.y = (np.array(self.y, dtype=int) + label_offset)
def __len__(self):
return len(self._filenames)
def filenames(self):
return self._filenames |
def trial_spinglass(inputs, output, size_dict, icool_fact=0.01, igamma=0.01, **kwargs):
return trial_igraph_partition(inputs, output, size_dict, method='spinglass', gamma=(1 - igamma), cool_fact=(1 - icool_fact), **kwargs) |
def main(args):
accuracies = 0.0
for i in range(10):
filepath = (args.result_file + str(i))
with open(filepath, 'r') as textfile:
all_file = textfile.read().split('\n')
all_file = [v for v in all_file if ('acc' in v)]
acc = [v.split('acc:')[1] for v in all_file]
acc = [v.split('(')[0] for v in acc]
acc = [float(v) for v in acc]
max_acc = max(acc)
print(('v%d, %f' % (i, max_acc)))
accuracies += max_acc
print(('%f' % (accuracies / 10))) |
def binarize_masks(state_dict, masks):
with torch.no_grad():
new_state_dict = {}
for (name, par) in state_dict.items():
if ('weight' in name):
mask = (name.rsplit('weight', 1)[0] + 'mask')
if (mask in masks):
par = (par * masks[mask].to(par))
par[(abs(par) == 0)] = 0
new_state_dict[name] = par
new_masks = {name: torch.ne(mask, 0).to(mask) for (name, mask) in masks.items()}
return (new_state_dict, new_masks) |
class ELMo(object):
def __init__(self, parameters):
self._model = None
self._elmo_model = None
self.parameters = parameters
self.compile_elmo()
def __del__(self):
K.clear_session()
del self._model
def char_level_token_encoder(self):
charset_size = self.parameters['charset_size']
char_embedding_size = self.parameters['char_embedding_size']
token_embedding_size = self.parameters['hidden_units_size']
n_highway_layers = self.parameters['n_highway_layers']
filters = self.parameters['cnn_filters']
token_maxlen = self.parameters['token_maxlen']
inputs = Input(shape=(None, token_maxlen), dtype='int32')
embeds = Embedding(input_dim=charset_size, output_dim=char_embedding_size)(inputs)
token_embeds = []
for (window_size, filters_size) in filters:
convs = Conv2D(filters=filters_size, kernel_size=[window_size, char_embedding_size], strides=(1, 1), padding='same')(embeds)
convs = TimeDistributed(GlobalMaxPool1D())(convs)
convs = Activation('tanh')(convs)
convs = Camouflage(mask_value=0)(inputs=[convs, inputs])
token_embeds.append(convs)
token_embeds = concatenate(token_embeds)
for i in range(n_highway_layers):
token_embeds = TimeDistributed(Highway())(token_embeds)
token_embeds = Camouflage(mask_value=0)(inputs=[token_embeds, inputs])
token_embeds = TimeDistributed(Dense(units=token_embedding_size, activation='linear'))(token_embeds)
token_embeds = Camouflage(mask_value=0)(inputs=[token_embeds, inputs])
token_encoder = Model(inputs=inputs, outputs=token_embeds, name='token_encoding')
return token_encoder
def compile_elmo(self, print_summary=False):
if (self.parameters['token_encoding'] == 'word'):
word_inputs = Input(shape=(None,), name='word_indices', dtype='int32')
embeddings = Embedding(self.parameters['vocab_size'], self.parameters['hidden_units_size'], trainable=True, name='token_encoding')
inputs = embeddings(word_inputs)
drop_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(inputs)
lstm_inputs = TimestepDropout(self.parameters['word_dropout_rate'])(drop_inputs)
next_ids = Input(shape=(None, 1), name='next_ids', dtype='float32')
previous_ids = Input(shape=(None, 1), name='previous_ids', dtype='float32')
elif (self.parameters['token_encoding'] == 'char'):
word_inputs = Input(shape=(None, self.parameters['token_maxlen']), dtype='int32', name='char_indices')
inputs = self.char_level_token_encoder()(word_inputs)
drop_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(inputs)
lstm_inputs = TimestepDropout(self.parameters['word_dropout_rate'])(drop_inputs)
next_ids = Input(shape=(None, 1), name='next_ids', dtype='float32')
previous_ids = Input(shape=(None, 1), name='previous_ids', dtype='float32')
re_lstm_inputs = Lambda(function=ELMo.reverse)(lstm_inputs)
mask = Lambda(function=ELMo.reverse)(drop_inputs)
for i in range(self.parameters['n_lstm_layers']):
lstm = LSTM(units=self.parameters['lstm_units_size'], return_sequences=True, activation='tanh', recurrent_activation='sigmoid', kernel_constraint=MinMaxNorm(((- 1) * self.parameters['cell_clip']), self.parameters['cell_clip']), recurrent_constraint=MinMaxNorm(((- 1) * self.parameters['cell_clip']), self.parameters['cell_clip']))(lstm_inputs)
lstm = Camouflage(mask_value=0)(inputs=[lstm, drop_inputs])
proj = TimeDistributed(Dense(self.parameters['hidden_units_size'], activation='linear', kernel_constraint=MinMaxNorm(((- 1) * self.parameters['proj_clip']), self.parameters['proj_clip'])))(lstm)
lstm_inputs = add([proj, lstm_inputs], name='f_block_{}'.format((i + 1)))
lstm_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(lstm_inputs)
for i in range(self.parameters['n_lstm_layers']):
re_lstm = LSTM(units=self.parameters['lstm_units_size'], return_sequences=True, activation='tanh', recurrent_activation='sigmoid', kernel_constraint=MinMaxNorm(((- 1) * self.parameters['cell_clip']), self.parameters['cell_clip']), recurrent_constraint=MinMaxNorm(((- 1) * self.parameters['cell_clip']), self.parameters['cell_clip']))(re_lstm_inputs)
re_lstm = Camouflage(mask_value=0)(inputs=[re_lstm, mask])
re_proj = TimeDistributed(Dense(self.parameters['hidden_units_size'], activation='linear', kernel_constraint=MinMaxNorm(((- 1) * self.parameters['proj_clip']), self.parameters['proj_clip'])))(re_lstm)
re_lstm_inputs = add([re_proj, re_lstm_inputs], name='b_block_{}'.format((i + 1)))
re_lstm_inputs = SpatialDropout1D(self.parameters['dropout_rate'])(re_lstm_inputs)
re_lstm_inputs = Lambda(function=ELMo.reverse, name='reverse')(re_lstm_inputs)
sampled_softmax = SampledSoftmax(num_classes=self.parameters['vocab_size'], num_sampled=int(self.parameters['num_sampled']), tied_to=(embeddings if (self.parameters['weight_tying'] and (self.parameters['token_encoding'] == 'word')) else None))
outputs = sampled_softmax([lstm_inputs, next_ids])
re_outputs = sampled_softmax([re_lstm_inputs, previous_ids])
self._model = Model(inputs=[word_inputs, next_ids, previous_ids], outputs=[outputs, re_outputs])
self._model.compile(optimizer=Adagrad(lr=self.parameters['lr'], clipvalue=self.parameters['clip_value']), loss=None)
if print_summary:
self._model.summary()
def train(self, train_data, valid_data):
weights_file = os.path.join(MODELS_DIR, 'elmo_best_weights.hdf5')
save_best_model = ModelCheckpoint(filepath=weights_file, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
early_stopping = EarlyStopping(patience=self.parameters['patience'], restore_best_weights=True)
t_start = time.time()
self._model.fit_generator(train_data, validation_data=valid_data, epochs=self.parameters['epochs'], workers=(self.parameters['n_threads'] if self.parameters['n_threads'] else os.cpu_count()), use_multiprocessing=(True if self.parameters['multi_processing'] else False), callbacks=[save_best_model])
print('Training took {0} sec'.format(str((time.time() - t_start))))
def evaluate(self, test_data):
def unpad(x, y_true, y_pred):
y_true_unpad = []
y_pred_unpad = []
for (i, x_i) in enumerate(x):
for (j, x_ij) in enumerate(x_i):
if (x_ij == 0):
y_true_unpad.append(y_true[i][:j])
y_pred_unpad.append(y_pred[i][:j])
break
return (np.asarray(y_true_unpad), np.asarray(y_pred_unpad))
(x, y_true_forward, y_true_backward) = ([], [], [])
for i in range(len(test_data)):
test_batch = test_data[i][0]
x.extend(test_batch[0])
y_true_forward.extend(test_batch[1])
y_true_backward.extend(test_batch[2])
x = np.asarray(x)
y_true_forward = np.asarray(y_true_forward)
y_true_backward = np.asarray(y_true_backward)
(y_pred_forward, y_pred_backward) = self._model.predict([x, y_true_forward, y_true_backward])
(y_true_forward, y_pred_forward) = unpad(x, y_true_forward, y_pred_forward)
(y_true_backward, y_pred_backward) = unpad(x, y_true_backward, y_pred_backward)
print('Forward Langauge Model Perplexity: {}'.format(ELMo.perplexity(y_pred_forward, y_true_forward)))
print('Backward Langauge Model Perplexity: {}'.format(ELMo.perplexity(y_pred_backward, y_true_backward)))
def wrap_multi_elmo_encoder(self, print_summary=False, save=False):
elmo_embeddings = list()
elmo_embeddings.append(concatenate([self._model.get_layer('token_encoding').output, self._model.get_layer('token_encoding').output], name='elmo_embeddings_level_0'))
for i in range(self.parameters['n_lstm_layers']):
elmo_embeddings.append(concatenate([self._model.get_layer('f_block_{}'.format((i + 1))).output, Lambda(function=ELMo.reverse)(self._model.get_layer('b_block_{}'.format((i + 1))).output)], name='elmo_embeddings_level_{}'.format((i + 1))))
camos = list()
for (i, elmo_embedding) in enumerate(elmo_embeddings):
camos.append(Camouflage(mask_value=0.0, name='camo_elmo_embeddings_level_{}'.format((i + 1)))([elmo_embedding, self._model.get_layer('token_encoding').output]))
self._elmo_model = Model(inputs=[self._model.get_layer('word_indices').input], outputs=camos)
if print_summary:
self._elmo_model.summary()
if save:
self._elmo_model.save(os.path.join(MODELS_DIR, 'ELMo_Encoder.hd5'))
print('ELMo Encoder saved successfully')
def save(self, sampled_softmax=True):
if (not sampled_softmax):
self.parameters['num_sampled'] = self.parameters['vocab_size']
self.compile_elmo()
self._model.load_weights(os.path.join(MODELS_DIR, 'elmo_best_weights.hdf5'))
self._model.save(os.path.join(MODELS_DIR, 'ELMo_LM_EVAL.hd5'))
print('ELMo Language Model saved successfully')
def load(self):
self._model = load_model(os.path.join(MODELS_DIR, 'ELMo_LM.h5'), custom_objects={'TimestepDropout': TimestepDropout, 'Camouflage': Camouflage})
def load_elmo_encoder(self):
self._elmo_model = load_model(os.path.join(MODELS_DIR, 'ELMo_Encoder.hd5'), custom_objects={'TimestepDropout': TimestepDropout, 'Camouflage': Camouflage})
def get_outputs(self, test_data, output_type='word', state='last'):
x = []
for i in range(len(test_data)):
test_batch = test_data[i][0]
x.extend(test_batch[0])
preds = np.asarray(self._elmo_model.predict(np.asarray(x)))
if (state == 'last'):
elmo_vectors = preds[(- 1)]
else:
elmo_vectors = np.mean(preds, axis=0)
if (output_type == 'words'):
return elmo_vectors
else:
return np.mean(elmo_vectors, axis=1)
def reverse(inputs, axes=1):
return K.reverse(inputs, axes=axes)
def perplexity(y_pred, y_true):
cross_entropies = []
for (y_pred_seq, y_true_seq) in zip(y_pred, y_true):
y_true_seq = to_categorical(y_true_seq, y_pred_seq.shape[(- 1)])
cross_entropy = K.categorical_crossentropy(K.tf.convert_to_tensor(y_true_seq, dtype=K.tf.float32), K.tf.convert_to_tensor(y_pred_seq, dtype=K.tf.float32))
cross_entropies.extend(cross_entropy.eval(session=K.get_session()))
cross_entropy = np.mean(np.asarray(cross_entropies), axis=(- 1))
return pow(2.0, cross_entropy) |
def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True):
with mock.patch('{}.keypoint_rcnn_inference'.format(patched_module), side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint)) as mocked_func:
(yield)
if check:
assert (mocked_func.call_count > 0) |
def metrics_generator(array, tolerance):
max_diff = np.max(array)
mean_diff = np.mean(array)
median_diff = np.median(array)
success_rate = (np.sum((array < tolerance)) / array.size)
return (max_diff, mean_diff, median_diff, success_rate) |
def wms_loss(distances, embeddings, d_alpha, d_beta, alpha=2.0, beta=50.0, lamb=1.0, eps=0.1, ms_mining=True, wfunction='exp', sumfunction='ms'):
embeddings = tf.nn.l2_normalize(embeddings, axis=1)
batch_size = embeddings.get_shape().as_list()[0]
if (wfunction == 'lin'):
mask_pos = tf.where((distances < d_beta), (1.0 - tf.divide(distances, d_beta)), tf.zeros_like(distances))
mask_neg = tf.where((distances < d_beta), tf.divide(distances, d_beta), tf.ones_like(distances))
elif (wfunction == 'tanh'):
mask_pos = (1.0 - tf.tanh((distances / d_beta)))
mask_neg = tf.tanh((distances / d_beta))
else:
mask_pos = tf.divide(1.0, (1.0 + tf.exp((d_alpha * (distances - d_beta)))))
mask_neg = tf.divide(1.0, (1.0 + tf.exp((d_alpha * (d_beta - distances)))))
mask_pos = (tf.cast(mask_pos, dtype=tf.float32) - tf.eye(batch_size, dtype=tf.float32))
mask_neg = tf.cast(mask_neg, dtype=tf.float32)
sim_mat = tf.matmul(embeddings, embeddings, transpose_a=False, transpose_b=True)
sim_mat = tf.maximum(sim_mat, 0.0)
pos_mat = tf.multiply(sim_mat, mask_pos)
neg_mat = tf.multiply(sim_mat, mask_neg)
if ms_mining:
max_val = tf.reduce_max(neg_mat, axis=1, keepdims=True)
tmp_max_val = tf.reduce_max(pos_mat, axis=1, keepdims=True)
min_val = (tf.reduce_min(tf.multiply((sim_mat - tmp_max_val), mask_pos), axis=1, keepdims=True) + tmp_max_val)
mask_pos = tf.where((pos_mat < (max_val + eps)), mask_pos, tf.zeros_like(mask_pos))
mask_neg = tf.where((neg_mat > (min_val - eps)), mask_neg, tf.zeros_like(mask_neg))
if (sumfunction == 'plain'):
pos_exp = tf.where((mask_pos > 0.0), pos_mat, tf.zeros_like(pos_mat))
neg_exp = tf.where((mask_neg > 0.0), neg_mat, tf.zeros_like(neg_mat))
pos_term = tf.reduce_sum(pos_exp, axis=1)
neg_term = tf.reduce_sum(neg_exp, axis=1)
loss = tf.reduce_mean((neg_term - pos_term))
elif (sumfunction == 'ms'):
pos_exp = tf.exp(((- alpha) * (pos_mat - lamb)))
pos_exp = tf.where((mask_pos > 0.0), pos_exp, tf.zeros_like(pos_exp))
neg_exp = tf.exp((beta * (neg_mat - lamb)))
neg_exp = tf.where((mask_neg > 0.0), neg_exp, tf.zeros_like(neg_exp))
pos_term = (tf.log((1.0 + tf.reduce_sum(pos_exp, axis=1))) / alpha)
neg_term = (tf.log((1.0 + tf.reduce_sum(neg_exp, axis=1))) / beta)
loss = tf.reduce_mean((pos_term + neg_term))
return loss |
def build_grid(resolution):
ranges = [torch.linspace(0.0, 1.0, steps=res) for res in resolution]
grid = torch.meshgrid(*ranges)
grid = torch.stack(grid, dim=(- 1))
grid = torch.reshape(grid, [resolution[0], resolution[1], (- 1)])
grid = grid.unsqueeze(0)
return torch.cat([grid, (1.0 - grid)], dim=(- 1)) |
class WeightDecay(L2Regularization):
def __init__(self, *kargs, **kwargs):
super(WeightDecay, self).__init__(*kargs, **kwargs) |
def set_dico_parameters(params, data, dico):
if ('dico' in data):
assert (data['dico'] == dico)
else:
data['dico'] = dico
n_words = len(dico)
bos_index = dico.index(BOS_WORD)
eos_index = dico.index(EOS_WORD)
pad_index = dico.index(PAD_WORD)
unk_index = dico.index(UNK_WORD)
mask_index = dico.index(MASK_WORD)
if hasattr(params, 'bos_index'):
assert (params.n_words == n_words)
assert (params.bos_index == bos_index)
assert (params.eos_index == eos_index)
assert (params.pad_index == pad_index)
assert (params.unk_index == unk_index)
assert (params.mask_index == mask_index)
else:
params.n_words = n_words
params.bos_index = bos_index
params.eos_index = eos_index
params.pad_index = pad_index
params.unk_index = unk_index
params.mask_index = mask_index |
def densenet121(pretrained: bool=False, progress: bool=True, num_classes: int=1000, layer_config=None) -> DenseNet:
print('Converting Densenet-121 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(models.densenet121, MODE, layer_config, pretrained, progress, num_classes) |
class BaseActor():
def __init__(self, net, objective):
self.net = net
self.objective = objective
def __call__(self, data: TensorDict):
raise NotImplementedError
def to(self, device):
self.net.to(device)
def train(self, mode=True):
self.net.train(mode)
print('======> fix backbone again <=======')
for param in self.net.feature_extractor.parameters():
param.requires_grad = False
for m in self.net.feature_extractor.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for layer in ['layeronline']:
for param in getattr(self.net.feature_extractor.features.features, layer).parameters():
param.requires_grad = True
for m in getattr(self.net.feature_extractor.features.features, layer).modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
print('double check trainable')
self.check_trainable(self.net)
def eval(self):
self.train(False)
def check_trainable(self, model):
trainable_params = [p for p in model.parameters() if p.requires_grad]
print('trainable params:')
for (name, param) in model.named_parameters():
if param.requires_grad:
print(name)
assert (len(trainable_params) > 0), 'no trainable parameters' |
def gpt_init(meta_vocab_size=None, args=None):
n_layer = args.n_layer
n_head = args.n_head
n_embd = args.n_embd
block_size = args.block_size
bias = args.bias
dropout = args.dropout
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size, bias=bias, vocab_size=None, dropout=dropout)
log_rank0('Initializing a new model from scratch')
if (meta_vocab_size is None):
print('defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)')
model_args['vocab_size'] = (meta_vocab_size if (meta_vocab_size is not None) else 50304)
gptconf = GPTConfig(**model_args)
return GPT(gptconf) |
def split_into_sentences(text):
text = ((' ' + text) + ' ')
text = text.replace('\n', ' ')
text = re.sub(prefixes, '\\1<prd>', text)
text = re.sub(websites, '<prd>\\1', text)
if ('Ph.D' in text):
text = text.replace('Ph.D.', 'Ph<prd>D<prd>')
text = re.sub((('\\s' + alphabets) + '[.] '), ' \\1<prd> ', text)
text = re.sub(((acronyms + ' ') + starters), '\\1<stop> \\2', text)
text = re.sub((((((alphabets + '[.]') + alphabets) + '[.]') + alphabets) + '[.]'), '\\1<prd>\\2<prd>\\3<prd>', text)
text = re.sub((((alphabets + '[.]') + alphabets) + '[.]'), '\\1<prd>\\2<prd>', text)
text = re.sub((((' ' + suffixes) + '[.] ') + starters), ' \\1<stop> \\2', text)
text = re.sub(((' ' + suffixes) + '[.]'), ' \\1<prd>', text)
text = re.sub(((' ' + alphabets) + '[.]'), ' \\1<prd>', text)
text = re.sub(((digits + '[.]') + digits), '\\1<prd>\\2', text)
if ('' in text):
text = text.replace('.', '.')
if ('"' in text):
text = text.replace('."', '".')
if ('!' in text):
text = text.replace('!"', '"!')
if ('?' in text):
text = text.replace('?"', '"?')
text = text.replace('.', '.<stop>')
text = text.replace('?', '?<stop>')
text = text.replace('!', '!<stop>')
text = text.replace('<prd>', '.')
sentences = text.split('<stop>')
sentences = sentences[:(- 1)]
sentences = [s.strip() for s in sentences]
return sentences |
class SEBottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.se = SELayer((planes * self.expansion), reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def _iwae(model, x, K):
(qz_x, px_z, zs) = model(x, K)
lpz = model.pz(*model.pz_params).log_prob(zs).sum((- 1))
lpx_z = (px_z.log_prob(x).view(*px_z.batch_shape[:2], (- 1)) * model.llik_scaling)
lqz_x = qz_x.log_prob(zs).sum((- 1))
return ((lpz + lpx_z.sum((- 1))) - lqz_x) |
def gendata(records, index, type):
for (k, v) in zip(records.keys(), records.values()):
(yield {'_index': index, '_id': k, '_source': v}) |
def get_num_min_class(labels):
argmax_labels = np.argmax(labels, axis=(- 1))
num_samples = labels.shape[0]
for i in range(labels.shape[(- 1)]):
lab_elems = np.sum((argmax_labels == i))
if (lab_elems < num_samples):
num_samples = lab_elems
return num_samples |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.