code stringlengths 101 5.91M |
|---|
class TestStochasticNPHawkesProcessClass(unittest.TestCase):
def setUp(self):
self.history = EventSeq([0.0, 1.0, 2.0, 3.0], [0.0, 4.0])
def test_neg_ll(self):
bg_intensity = 1.0
hawkes = NonparametricHawkesProcessWithStochasticApproximation(bg_intensity=bg_intensity, n_inducing_points=5)
hawkes.randomize_params(3.0, 0.01, except_for=['ind_points'])
print(hawkes.neg_ll(self.history, debug=True))
def test_fit(self):
bg_intensity = 0.2
true_hawkes = NonparametricHawkesProcess(bg_intensity=bg_intensity, n_inducing_points=5)
true_hawkes.params['kernel_weight'].data = torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0])
history_list = true_hawkes.simulate(50, [0, 20])
np_hawkes = NonparametricHawkesProcessWithStochasticApproximation(bg_intensity=bg_intensity, n_inducing_points=5)
np_hawkes.randomize_params(3.0, 0.01, except_for=['ind_points'])
print(' * initial hawkes\n', np_hawkes)
np_hawkes.fit(history_list, n_epochs=50, batch_size=1, print_freq=10, optimizer_kwargs={'lr': 0.05})
print(' * true hawkes\n', true_hawkes)
print(' * learned hawkes\n', np_hawkes) |
def cheetah():
locals().update(default())
env = 'HalfCheetah-v1'
max_length = 1000
steps = .0
return locals() |
()
def make_predictions(args: PredictArgs, smiles: List[str]=None) -> List[List[Optional[float]]]:
print('Loading training args')
(scaler, features_scaler) = load_scalers(args.checkpoint_paths[0])
train_args = load_args(args.checkpoint_paths[0])
(num_tasks, task_names) = (train_args.num_tasks, train_args.task_names)
if (((train_args.features_path is not None) or (train_args.features_generator is not None)) and (args.features_path is None) and (args.features_generator is None)):
raise ValueError('Features were used during training so they must be specified again during prediction using the same type of features as before (with either --features_generator or --features_path and using --no_features_scaling if applicable).')
for (key, value) in vars(train_args).items():
if (not hasattr(args, key)):
setattr(args, key, value)
args: Union[(PredictArgs, TrainArgs)]
print('Loading data')
if (smiles is not None):
full_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=False, features_generator=args.features_generator)
else:
full_data = get_data(path=args.test_path, args=args, target_columns=[], ignore_columns=[], skip_invalid_smiles=False, store_row=True)
print('Validating SMILES')
full_to_valid_indices = {}
valid_index = 0
for full_index in range(len(full_data)):
if (full_data[full_index].mol is not None):
full_to_valid_indices[full_index] = valid_index
valid_index += 1
test_data = MoleculeDataset([full_data[i] for i in sorted(full_to_valid_indices.keys())])
if (len(test_data) == 0):
return ([None] * len(full_data))
print(f'Test size = {len(test_data):,}')
if args.features_scaling:
test_data.normalize_features(features_scaler)
if (args.dataset_type == 'multiclass'):
sum_preds = np.zeros((len(test_data), num_tasks, args.multiclass_num_classes))
else:
sum_preds = np.zeros((len(test_data), num_tasks))
test_data_loader = MoleculeDataLoader(dataset=test_data, batch_size=args.batch_size, num_workers=args.num_workers)
print(f'Predicting with an ensemble of {len(args.checkpoint_paths)} models')
for checkpoint_path in tqdm(args.checkpoint_paths, total=len(args.checkpoint_paths)):
model = load_checkpoint(checkpoint_path, device=args.device)
model_preds = predict(model=model, data_loader=test_data_loader, scaler=scaler)
sum_preds += np.array(model_preds)
avg_preds = (sum_preds / len(args.checkpoint_paths))
avg_preds = avg_preds.tolist()
print(f'Saving predictions to {args.preds_path}')
assert (len(test_data) == len(avg_preds))
makedirs(args.preds_path, isfile=True)
if (args.dataset_type == 'multiclass'):
task_names = [f'{name}_class_{i}' for name in task_names for i in range(args.multiclass_num_classes)]
else:
task_names = task_names
for (full_index, datapoint) in enumerate(full_data):
valid_index = full_to_valid_indices.get(full_index, None)
preds = (avg_preds[valid_index] if (valid_index is not None) else (['Invalid SMILES'] * len(task_names)))
for (pred_name, pred) in zip(task_names, preds):
datapoint.row[pred_name] = pred
with open(args.preds_path, 'w') as f:
writer = csv.DictWriter(f, fieldnames=full_data[0].row.keys())
writer.writeheader()
for datapoint in full_data:
writer.writerow(datapoint.row)
return avg_preds |
def pyaudio_featurize(file, basedir):
curdir = os.getcwd()
shutil.copy(((curdir + '/') + file), ((basedir + '/helpers/') + file))
os.chdir((basedir + '/helpers/'))
os.system(('python3 %s/helpers/pyaudio_help.py %s' % (basedir, file)))
jsonfile = (file[0:(- 4)] + '.json')
g = json.load(open(jsonfile))
features = g['features']
labels = g['labels']
os.remove(jsonfile)
os.chdir(curdir)
return (features, labels) |
def split_shard_dim_with_reshuffle_check(input_, shard_dim, group=None, ranks=None):
return _SplitShardDimReshuffleCheck.apply(input_, shard_dim, group, ranks) |
def main():
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join((str(gpu) for gpu in args.gpus))
cfg = Config.fromfile(args.config)
cfg.gpus = len(args.gpus)
cfg.load_from = args.load_from
cfg.finetune_from = args.finetune_from
cfg.view = args.view
cfg.work_dirs = ((args.work_dirs + '/') + cfg.dataset.train.type)
cudnn.benchmark = True
cudnn.fastest = True
runner = Runner(cfg)
if args.validate:
val_loader = build_dataloader(cfg.dataset.val, cfg, is_train=False)
runner.validate(val_loader)
else:
runner.train() |
def test_cvrp__equivalence_dense_sparse_reward(cvrp_dense_reward: CVRP, cvrp_sparse_reward: CVRP) -> None:
dense_step_fn = jax.jit(cvrp_dense_reward.step)
sparse_step_fn = jax.jit(cvrp_sparse_reward.step)
key = jax.random.PRNGKey(0)
(state, timestep) = cvrp_dense_reward.reset(key)
return_dense = timestep.reward
while (not timestep.last()):
(state, timestep) = dense_step_fn(state, jnp.argmin(state.visited_mask))
return_dense += timestep.reward
(state, timestep) = cvrp_sparse_reward.reset(key)
return_sparse = timestep.reward
while (not timestep.last()):
(state, timestep) = sparse_step_fn(state, jnp.argmin(state.visited_mask))
return_sparse += timestep.reward
assert (return_sparse == return_dense > (((- 2) * cvrp_dense_reward.num_nodes) * jnp.sqrt(2))) |
def process_one_sentence(sent_parse: TreeNode, abs_str: str) -> List:
abs_list = abs_str.split(' ')
sent_tree = read_single_parse_tree(sent_parse)
tree_len = len(sent_tree.text)
abs_len = len(abs_str.split(' '))
sent_str = ' '.join(sent_tree.text)
rt_del_spans = []
del_spans = find_deletable_span_rule_based_updated(sent_tree, root_len=tree_len, parent=None, grand_parent=None)
baseline_rouge = get_rouge_est_str_2gram_smart_kick_stop_words(gold=abs_str, pred=sent_str)
if (baseline_rouge < 0.03):
useless_baseline = True
else:
useless_baseline = False
for del_sp in del_spans:
if (len(del_sp['selected_idx']) < 1):
continue
full_set = set(range(len(sent_tree.text)))
remain_idx = list((full_set - del_sp['selected_idx']))
remain_idx.sort()
_txt = ' '.join([sent_tree.text[idx] for idx in remain_idx])
_rouge = get_rouge_est_str_2gram_smart_kick_stop_words(gold=abs_str, pred=_txt)
rt_del_spans.append({'node': del_sp['node'], 'rouge': _rouge, 'selected_idx': list(del_sp['selected_idx']), 'ratio': ((_rouge / baseline_rouge) if (not useless_baseline) else 1.0), 'label': (- 1)})
rt_del_spans.append({'node': 'BASELINE', 'rouge': get_rouge_est_str_2gram_smart_kick_stop_words(gold=abs_str, pred=sent_str), 'selected_idx': [(tree_len - 1)], 'ratio': 0.0, 'label': (- 1)})
return [sent_tree, rt_del_spans, baseline_rouge] |
def train_single_epoch(epoch, model, train_loader, transform, optimizer, eval_loader, plotfilename=None):
model.train()
(errs, losses) = ([], [])
start = datetime.now()
for (idx, (x, y, clas)) in enumerate(train_loader):
x = torch.unsqueeze(x, dim=1)
optimizer.zero_grad()
(x, y, clas) = (x.to(device), y.to(device), clas.to(device))
(yhat, reghat, alphas, clashat) = model(x)
loss_out = F.mse_loss(yhat, y)
if model.classification_enabled:
loss_clas = F.binary_cross_entropy(clashat, clas)
loss = (loss_out + loss_clas)
else:
loss = loss_out
loss.backward()
optimizer.step()
err = error(y, yhat)
(loss_, err_) = (loss.item(), err.item())
losses.append(loss_)
errs.append(err_)
if ((idx % 100) == 0):
print(f'train epoch={epoch} batch={(idx + 1)} loss={loss:.2f} err={err:.2f}')
if plotfilename:
filename = (plotfilename + f'.{idx}.png')
x = x.cpu()
y = y.cpu()
yhat = yhat.cpu()
reghat = reghat.cpu()
if transform:
x = ((x * transform['sample_std']) + transform['sample_mean'])
y = ((y * transform['target_std']) + transform['target_mean'])
yhat = ((yhat * transform['target_std']) + transform['target_mean'])
reghat = ((reghat * transform['sample_std']) + transform['sample_mean'])
reghat = (reghat / 10.0)
plot_window(x, y, yhat, reghat, clashat.cpu(), alphas.cpu(), loss_, err_, model.classification_enabled, filename)
end = datetime.now()
total_seconds = (end - start).seconds
print('')
print(f'Epoch seconds: {total_seconds}')
print('')
return (np.mean(losses), np.mean(errs)) |
def _count_tokens(files, file_byte_limit=1000000.0, correct_strip=True):
token_counts = collections.defaultdict(int)
for filepath in files:
with tf.io.gfile.GFile(filepath, mode='r') as reader:
file_byte_budget = file_byte_limit
counter = 0
lines_to_skip = int((reader.size() / (file_byte_budget * 2)))
for line in reader:
if (counter < lines_to_skip):
counter += 1
else:
if (file_byte_budget < 0):
break
if correct_strip:
line = native_to_unicode(line)
line = line.strip()
file_byte_budget -= len(line)
counter = 0
for token in _split_string_to_tokens(native_to_unicode(line)):
token_counts[token] += 1
return token_counts |
class FewShotSeg(nn.Module):
def __init__(self, pretrained_weights='deeplabv3'):
super().__init__()
self.encoder = Res101Encoder(replace_stride_with_dilation=[True, True, False], pretrained_weights=pretrained_weights)
self.device = torch.device('cuda')
self.scaler = 20.0
self.criterion = nn.NLLLoss()
self.criterion_MSE = nn.MSELoss()
self.alpha = torch.Tensor([1.0, 0.0])
self.fg_sampler = np.random.RandomState(1289)
self.fg_num = 10
self.MHA = MultiHeadAttention(n_head=3, d_model=512, d_k=512, d_v=512)
self.MLP = MultiLayerPerceptron(dim=512, mlp_dim=1024)
self.layer_norm = nn.LayerNorm(512)
def forward(self, supp_imgs, supp_mask, qry_imgs, train=False, t_loss_scaler=1, n_iters=20):
self.n_ways = len(supp_imgs)
self.n_shots = len(supp_imgs[0])
self.n_queries = len(qry_imgs)
self.iter = 3
assert (self.n_ways == 1)
assert (self.n_queries == 1)
qry_bs = qry_imgs[0].shape[0]
supp_bs = supp_imgs[0][0].shape[0]
img_size = supp_imgs[0][0].shape[(- 2):]
supp_mask = torch.stack([torch.stack(way, dim=0) for way in supp_mask], dim=0).view(supp_bs, self.n_ways, self.n_shots, *img_size)
imgs_concat = torch.cat(([torch.cat(way, dim=0) for way in supp_imgs] + [torch.cat(qry_imgs, dim=0)]), dim=0)
(img_fts, tao) = self.encoder(imgs_concat)
supp_fts = img_fts[:((self.n_ways * self.n_shots) * supp_bs)].view(supp_bs, self.n_ways, self.n_shots, (- 1), *img_fts.shape[(- 2):])
qry_fts = img_fts[((self.n_ways * self.n_shots) * supp_bs):].view(qry_bs, self.n_queries, (- 1), *img_fts.shape[(- 2):])
self.t = tao[((self.n_ways * self.n_shots) * supp_bs):]
self.thresh_pred = [self.t for _ in range(self.n_ways)]
outputs = []
for epi in range(supp_bs):
fg_partition_prototypes = [[self.compute_multiple_prototypes(self.fg_num, supp_fts[([epi], way, shot)], supp_mask[([epi], way, shot)], self.fg_sampler) for shot in range(self.n_shots)] for way in range(self.n_ways)]
supp_fts_ = [[self.getFeatures(supp_fts[([epi], way, shot)], supp_mask[([epi], way, shot)]) for shot in range(self.n_shots)] for way in range(self.n_ways)]
fg_prototypes = self.getPrototype(supp_fts_)
qry_pred = torch.stack([self.getPred(qry_fts[epi], fg_prototypes[way], self.thresh_pred[way]) for way in range(self.n_ways)], dim=1)
qry_prototype_coarse = self.getFeatures(qry_fts[epi], qry_pred[epi])
for i in range(self.iter):
fg_partition_prototypes = [[self.BATE(fg_partition_prototypes[way][shot][epi], qry_prototype_coarse) for shot in range(self.n_shots)] for way in range(self.n_ways)]
supp_proto = [[torch.mean(fg_partition_prototypes[way][shot], dim=1) for shot in range(self.n_shots)] for way in range(self.n_ways)]
qry_pred_coarse = torch.stack([self.getPred(qry_fts[epi], supp_proto[way][epi], self.thresh_pred[way]) for way in range(self.n_ways)], dim=1)
qry_prototype_coarse = self.getFeatures(qry_fts[epi], qry_pred_coarse[epi])
qry_pred = torch.stack([self.getPred(qry_fts[epi], supp_proto[way][epi], self.thresh_pred[way]) for way in range(self.n_ways)], dim=1)
qry_pred_up = F.interpolate(qry_pred, size=img_size, mode='bilinear', align_corners=True)
preds = torch.cat(((1.0 - qry_pred_up), qry_pred_up), dim=1)
outputs.append(preds)
output = torch.stack(outputs, dim=1)
output = output.view((- 1), *output.shape[2:])
return output
def getPred(self, fts, prototype, thresh):
sim = ((- F.cosine_similarity(fts, prototype[(..., None, None)], dim=1)) * self.scaler)
pred = (1.0 - torch.sigmoid((0.5 * (sim - thresh))))
return pred
def getFeatures(self, fts, mask):
fts = F.interpolate(fts, size=mask.shape[(- 2):], mode='bilinear')
masked_fts = (torch.sum((fts * mask[(None, ...)]), dim=((- 2), (- 1))) / (mask[(None, ...)].sum(dim=((- 2), (- 1))) + 1e-05))
return masked_fts
def getFeatures_fg(self, fts, mask):
fts_ = fts.squeeze(0).permute(1, 2, 0)
fts_ = fts_.view((fts_.size()[0] * fts_.size()[1]), fts_.size()[2])
mask_ = F.interpolate(mask.unsqueeze(0), size=fts.shape[(- 2):], mode='bilinear')
mask_ = mask_.view((- 1))
l = math.ceil(mask_.sum())
c = torch.argsort(mask_, descending=True, dim=0)
fg = c[:l]
fts_fg = fts_[fg]
return fts_fg
def getPrototype(self, fg_fts):
(n_ways, n_shots) = (len(fg_fts), len(fg_fts[0]))
fg_prototypes = [(torch.sum(torch.cat([tr for tr in way], dim=0), dim=0, keepdim=True) / n_shots) for way in fg_fts]
return fg_prototypes
def compute_multiple_prototypes(self, fg_num, sup_fts, sup_fg, sampler):
(B, C, h, w) = sup_fts.shape
fg_mask = F.interpolate(sup_fg.unsqueeze(0), size=sup_fts.shape[(- 2):], mode='bilinear')
fg_mask = fg_mask.squeeze(0).bool()
batch_fg_protos = []
for b in range(B):
fg_protos = []
fg_mask_i = fg_mask[b]
with torch.no_grad():
if (fg_mask_i.sum() < fg_num):
fg_mask_i = fg_mask[b].clone()
fg_mask_i.view((- 1))[:fg_num] = True
all_centers = []
first = True
pts = torch.stack(torch.where(fg_mask_i), dim=1)
for _ in range(fg_num):
if first:
i = sampler.choice(pts.shape[0])
first = False
else:
dist = (pts.reshape((- 1), 1, 2) - torch.stack(all_centers, dim=0).reshape(1, (- 1), 2))
i = torch.argmax((dist ** 2).sum((- 1)).min(1)[0])
pt = pts[i]
all_centers.append(pt)
dist = (pts.reshape((- 1), 1, 2) - torch.stack(all_centers, dim=0).reshape(1, (- 1), 2))
fg_labels = torch.argmin((dist ** 2).sum((- 1)), dim=1)
fg_feats = sup_fts[b].permute(1, 2, 0)[fg_mask_i]
for i in range(fg_num):
proto = fg_feats[(fg_labels == i)].mean(0)
fg_protos.append(proto)
fg_protos = torch.stack(fg_protos, dim=1)
batch_fg_protos.append(fg_protos)
fg_proto = torch.stack(batch_fg_protos, dim=0).transpose(1, 2)
return fg_proto
def BATE(self, fg_prototypes, qry_prototype_coarse):
A = torch.mm(fg_prototypes, qry_prototype_coarse.t())
kc = ((A.min() + A.mean()) / 2).floor()
if (A is not None):
S = torch.zeros(A.size(), dtype=torch.float).cuda()
S[(A < kc)] = (- 10000.0)
A = torch.softmax((A + S), dim=0)
A = torch.mm(A, qry_prototype_coarse)
A = self.layer_norm((A + fg_prototypes))
T = self.MHA(A.unsqueeze(0), A.unsqueeze(0), A.unsqueeze(0))
T = self.MLP(T)
return T |
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
def zero_padding(text_tensor, tar_dim, device=None):
padding_size = (tar_dim - text_tensor.shape[1])
zero_tensor = torch.zeros((text_tensor.shape[0], padding_size), device=device)
padded_tensor = torch.cat([text_tensor, zero_tensor], dim=1)
return padded_tensor |
def test_inv_link_monoclassification0():
scores = np.array([[]])
expected = np.array([[1.0]])
result = inv_link(scores, 'monoclassification')
assert np.all((result == expected)) |
def get_existing_item(var_full_name, collection_name):
var_list = tf.get_collection(collection_name)
for v in var_list:
if (v.name == var_full_name):
return v
return None |
class SparseGraphConvolution(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool=False):
super(SparseGraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight.data)
if (self.bias is not None):
nn.init.constant_(self.bias.data, 0)
def forward(self, input: torch.sparse.FloatTensor, adj: torch.sparse.FloatTensor) -> torch.Tensor:
support = torch.spmm(input, self.weight)
output = torch.spmm(adj, support)
if (self.bias is not None):
return (output + self.bias)
else:
return output
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')') |
class LOSComputationGraph():
class Node():
def __init__(self, resolution, idx, residual=False):
(self.resolution, self.idx, self.residual) = (resolution, idx, residual)
self.residual_node = None
def __repr__(self):
residual_str = (', saves residual' if self.residual else '')
return (('<Node index: {} resolution: {}'.format(self.idx, self.resolution) + residual_str) + '>')
def __str__(self):
return self.__repr__()
def __lt__(self, other):
assert isinstance(other, LOSComputationGraph.Node)
return (self.idx < other.idx)
def __init__(self, genome, under_connect=True):
self.graph = LOSComputationGraph.make_graph(genome, under_connect)
def __len__(self):
return len(self.graph)
def __iter__(self):
return self.graph.__iter__()
def items(self):
return self.graph.items()
def keys(self):
return self.graph.keys()
def values(self):
return self.graph.values()
def get_residual(self, node):
if (node in self.graph):
for dep in self.graph[node]:
if ((dep.resolution == node.resolution) and dep.residual):
return dep
return None
def make_graph(genome, under_connect=True):
adj = OrderedDict()
nodes = [LOSComputationGraph.Node(pow(2, (- (gene - 1))), i) for (i, gene) in enumerate(genome)]
for (i, (gene_i, gene_ipo)) in enumerate(zip(nodes, nodes[1:])):
adj[gene_i] = [gene_ipo]
adj[nodes[(- 1)]] = []
previous_resolutions = {}
previous_node = nodes[0]
for (node, adj_list) in adj.items():
if (node.resolution in previous_resolutions):
if ((previous_node.resolution < node.resolution) or ((previous_node.resolution > node.resolution) and under_connect)):
previous_resolutions[node.resolution].residual = True
node.residual_node = previous_resolutions[node.resolution]
previous_resolutions[node.resolution] = node
else:
previous_resolutions[node.resolution] = node
else:
previous_resolutions[node.resolution] = node
previous_node = node
return adj |
def reset_sim(sim):
arcsim.init_physics('conf/rigidcloth/absparse/abqr_make.json', 'qr_out/out', False) |
class Mosei_Dataset(Dataset):
def __init__(self, name, args, token_to_ix=None, dataroot='data'):
super(Mosei_Dataset, self).__init__()
assert (name in ['train', 'valid', 'test', 'private'])
self.name = name
self.args = args
self.private_set = (name == 'private')
self.dataroot = os.path.join(dataroot, 'MOSEI')
word_file = os.path.join(self.dataroot, (name + '_sentences.p'))
audio_file = os.path.join(self.dataroot, (name + '_mels.p'))
video_file = os.path.join(self.dataroot, (name + '_mels.p'))
y_s_file = os.path.join(self.dataroot, (name + '_sentiment.p'))
y_e_file = os.path.join(self.dataroot, (name + '_emotion.p'))
self.set = eval((name.upper() + '_SET'))
self.key_to_word = pickle.load(open(word_file, 'rb'))
self.key_to_audio = pickle.load(open(audio_file, 'rb'))
self.key_to_video = pickle.load(open(video_file, 'rb'))
if (not self.private_set):
if (args.task == 'emotion'):
self.key_to_label = pickle.load(open(y_e_file, 'rb'))
if (args.task == 'sentiment'):
self.key_to_label = pickle.load(open(y_s_file, 'rb'))
for key in self.set:
if (not ((key in self.key_to_word) and (key in self.key_to_audio) and (key in self.key_to_video) and (key in self.key_to_label))):
self.set.remove(key)
self.key_to_sentence = tokenize(self.key_to_word)
if (token_to_ix is not None):
self.token_to_ix = token_to_ix
else:
(self.token_to_ix, self.pretrained_emb) = create_dict(self.key_to_sentence, self.dataroot)
self.vocab_size = len(self.token_to_ix)
self.l_max_len = 30
self.a_max_len = 50
self.v_max_len = args.video_seq_len
def __getitem__(self, idx):
key = self.set[idx]
L = sent_to_ix(self.key_to_sentence[key], self.token_to_ix, max_token=self.l_max_len)
A = pad_feature(self.key_to_audio[key], self.a_max_len)
V = pad_feature(self.key_to_video[key], self.v_max_len)
y = np.array([])
if (not self.private_set):
Y = self.key_to_label[key]
if ((self.args.task == 'sentiment') and self.args.task_binary):
c = cmumosei_2(Y)
y = np.array(c)
if ((self.args.task == 'sentiment') and (not self.args.task_binary)):
c = cmumosei_7(Y)
y = np.array(c)
if (self.args.task == 'emotion'):
Y[(Y > 0)] = 1
y = Y
return (key, torch.from_numpy(L), torch.from_numpy(A), torch.from_numpy(V).float(), torch.from_numpy(y))
def __len__(self):
return len(self.set) |
def optimal_transport_dist(txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1):
cost = cost_matrix_cosine(txt_emb, img_emb)
joint_pad = (txt_pad.unsqueeze((- 1)) | img_pad.unsqueeze((- 2)))
cost.masked_fill_(joint_pad, 0)
txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype)
T = ipot(cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k)
distance = trace(cost.matmul(T.detach()))
return distance |
def _block_shuffle(lst, block_size):
blocks = [lst[i:(i + block_size)] for i in range(0, len(lst), block_size)]
random.shuffle(blocks)
return [ele for block in blocks for ele in block] |
def test_iterator_cycle():
dataset = _construct_dataset(100)
ep_iter = dataset.get_episode_iterator(cycle=True, shuffle=False, group_by_scene=False)
for i in range(200):
episode = next(ep_iter)
assert (episode.episode_id == dataset.episodes[(i % 100)].episode_id)
ep_iter = dataset.get_episode_iterator(cycle=True, num_episode_sample=20)
episodes = list(islice(ep_iter, 20))
for i in range(200):
episode = next(ep_iter)
assert (episode.episode_id == episodes[(i % 20)].episode_id) |
class stochastic_energy_latency_50(nn.Module):
def __init__(self):
super(stochastic_energy_latency_50, self).__init__()
self.name = 'stochastic_energy_latency_50'
self.find = nn.Sequential(NPNLinear((50 * 52), 128, False), NPNRelu(), NPNLinear(128, 128), NPNRelu(), NPNLinear(128, 64), NPNRelu(), NPNLinear(64, 1), NPNSigmoid())
def forward(self, x):
x = x.reshape(1, (- 1))
(x, s) = self.find(x)
if (not (('train' in argv[0]) and ('train' in argv[2]))):
return (x + (UCB_K * s))
return (x, s) |
def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor:
(b, m, n) = scores.shape
one = scores.new_tensor(1)
(ms, ns) = ((m * one).to(scores), (n * one).to(scores))
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
couplings = torch.cat([torch.cat([scores, bins0], (- 1)), torch.cat([bins1, alpha], (- 1))], 1)
norm = (- (ms + ns).log())
log_mu = torch.cat([norm.expand(m), (ns.log()[None] + norm)])
log_nu = torch.cat([norm.expand(n), (ms.log()[None] + norm)])
(log_mu, log_nu) = (log_mu[None].expand(b, (- 1)), log_nu[None].expand(b, (- 1)))
Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters)
Z = (Z - norm)
return Z |
class TFKubernetesWorker():
def __init__(self, args):
self._args = args
task_conf = get_conf(py_conf=args.conf)
self._task_conf = task_conf
self.estimator_server_started = False
self.init_executor(task_conf)
def init_executor(self, task_conf):
logger.info('init_executor')
self.estimator = EstimatorExecutor(task_conf)
def start_failover_monitor(self):
if self._args.enable_auto_scaling:
self._task_conf.put(TFConstants.EnableDynamicSharding.name, True)
self.tensorflow_failover = TensorflowFailover()
self.tensorflow_failover.start_failover_monitor()
def run_ps(self):
logger.info('ps server join')
self.estimator.server.join()
def run_worker(self):
self.estimator.train_and_evaluate()
def run(self):
logger.info('KubernetesWorker is running!')
while True:
global_dict = common_util.GlobalDict()
self.start_failover_monitor()
global_dict['executor'] = self.estimator
self.estimator.prepare()
if (not self.estimator_server_started):
self.estimator.start_server()
self.estimator_server_started = True
if (self.estimator.task_type == TFConstants.PS()):
run_thread = threading.Thread(target=self.run_ps)
else:
run_thread = threading.Thread(target=self.run_worker)
if hasattr(self, 'tensorflow_failover'):
self.tensorflow_failover.set_training_thread(run_thread)
run_thread.start()
run_thread.join()
if (not run_thread.is_alive()):
if global_dict.get(TFConstants.RelaunchForPs.name, TFConstants.RelaunchForPs()):
logger.info('ps is migrating or scaling')
elif global_dict.get(TFConstants.RelaunchForFailure.name, TFConstants.RelaunchForFailure()):
logger.info('worker encounters ps failure and restart thread')
else:
break
global_dict.clear()
self.init_executor(self._task_conf)
continue |
def get_post_fmean(gp, X, Z, params=None):
ndata = X.shape[0]
ndims = X.shape[1]
ntest = Z.shape[0]
(lik_params, prior_params) = gp.decomp_params(params)
alpha = gp.stats[1]
fmu = gp.prior.get_mean(ntest)
G = gp.prior.get_cov(X=Z, Z=X, params=prior_params)
return (G.dot(alpha) + fmu) |
def test_find_1d_closest_idx_to_origin() -> None:
x = np.array([0., 0., 0., (- 0.)])
pos_idx = synthetic_crosswalk_generator.find_1d_closest_idx_to_origin(x, 'positive')
assert (pos_idx == 2)
neg_idx = synthetic_crosswalk_generator.find_1d_closest_idx_to_origin(x, 'negative')
assert (neg_idx == 3) |
def filter_answers(answers_dset, min_occurence):
occurence = {}
for ans_entry in answers_dset:
gtruth = ans_entry.get('multiple_choice_answer', None)
if (gtruth is None):
gtruth = ans_entry['answers'][0]['answer']
gtruth = preprocess_answer(gtruth)
if (gtruth not in occurence):
occurence[gtruth] = set()
occurence[gtruth].add(ans_entry['question_id'])
for answer in list(occurence):
if (len(occurence[answer]) < min_occurence):
occurence.pop(answer)
print(('Num of answers that appear >= %d times: %d' % (min_occurence, len(occurence))))
return occurence |
def create_dataset_zundamon(filename):
textful_dir_list = glob.glob('dataset/textful/*')
textless_dir_list = glob.glob('dataset/textless/*')
textful_dir_list.sort()
textless_dir_list.sort()
Correspondence_list = list()
output_file_list = list()
output_file_list_val = list()
output_file_list_textless = list()
output_file_list_val_textless = list()
my_path = 'dataset/textful/00_myvoice'
zundamon_path = 'dataset/textful/1205_zundamon'
speaker_id = 0
d = my_path
wav_file_list = glob.glob((d + '/wav/*.wav'))
lab_file_list = glob.glob((d + '/text/*.txt'))
wav_file_list.sort()
lab_file_list.sort()
if (len(wav_file_list) == 0):
print((('Error' + d) + '/wav '))
exit()
counter = 0
for (lab, wav) in zip(lab_file_list, wav_file_list):
with open(lab, 'r', encoding='utf-8') as f:
mozi = f.read().split('\n')
print(str(mozi))
test = mozi2phone(str(mozi))
print(test)
print(((((wav + '|') + str(speaker_id)) + '|') + test))
if ((counter % 10) != 0):
output_file_list.append((((((wav + '|') + str(speaker_id)) + '|') + test) + '\n'))
else:
output_file_list_val.append((((((wav + '|') + str(speaker_id)) + '|') + test) + '\n'))
counter = (counter + 1)
Correspondence_list.append((((str(speaker_id) + '|') + os.path.basename(d)) + '\n'))
speaker_id = 101
d = zundamon_path
wav_file_list = glob.glob((d + '/wav/*.wav'))
lab_file_list = glob.glob((d + '/text/*.txt'))
wav_file_list.sort()
lab_file_list.sort()
if (len(wav_file_list) == 0):
print((('Error' + d) + '/wav '))
exit()
counter = 0
for (lab, wav) in zip(lab_file_list, wav_file_list):
with open(lab, 'r', encoding='utf-8') as f:
mozi = f.read().split('\n')
print(str(mozi))
test = mozi2phone(str(mozi))
print(test)
print(((((wav + '|') + str(speaker_id)) + '|') + test))
if ((counter % 10) != 0):
output_file_list.append((((((wav + '|') + str(speaker_id)) + '|') + test) + '\n'))
else:
output_file_list_val.append((((((wav + '|') + str(speaker_id)) + '|') + test) + '\n'))
counter = (counter + 1)
Correspondence_list.append((((str(speaker_id) + '|') + os.path.basename(d)) + '\n'))
for d in textless_dir_list:
wav_file_list = glob.glob((d + '/*.wav'))
wav_file_list.sort()
counter = 0
for wav in wav_file_list:
print((((wav + '|') + str(speaker_id)) + '|a'))
if ((counter % 10) != 0):
output_file_list_textless.append(((((wav + '|') + str(speaker_id)) + '|a') + '\n'))
else:
output_file_list_val_textless.append(((((wav + '|') + str(speaker_id)) + '|a') + '\n'))
counter = (counter + 1)
Correspondence_list.append((((str(speaker_id) + '|') + os.path.basename(d)) + '\n'))
speaker_id = (speaker_id + 1)
with open((('filelists/' + filename) + '_textful.txt'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(output_file_list)
with open((('filelists/' + filename) + '_textful_val.txt'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(output_file_list_val)
with open((('filelists/' + filename) + '_textless.txt'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(output_file_list_textless)
with open((('filelists/' + filename) + '_val_textless.txt'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(output_file_list_val_textless)
with open((('filelists/' + filename) + '_Correspondence.txt'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(Correspondence_list)
return 255 |
_model
def mobilenetv3_small_100(pretrained=False, **kwargs):
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model |
def test_series_captured(capture):
with capture:
m.captured_output('a')
m.captured_output('b')
assert (capture == 'ab') |
def train(model, train_data_loader, test_data_loader, epochs, criterion, optimizer, filename='test_cm'):
for epoch in range(epochs):
model.train()
total_loss = 0.0
total = 0
correct = 0
for (i, data) in enumerate(train_data_loader):
(inputs, labels) = data
inputs = inputs.double()
labels = labels.long()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
(_, predicted) = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
epoch_train_loss = (total_loss / len(train_data_loader))
epoch_train_acc = (correct / total)
print(f'Epoch {(epoch + 1)}, train loss = {epoch_train_loss}, train acc = {epoch_train_acc}')
if (((epoch + 1) % 5) == 0):
_eval(model, test_data_loader, criterion, epoch)
_final_eval(model, test_data_loader, criterion, filename)
print('Finished Training and testing') |
def convert_model_th_to_tf(torch_module, checkpoint):
import onnx
from onnx_tf.backend import prepare
import tensorflow as tf
class TFStoredModel():
def __init__(self, model, output_type, output_names):
self.model = model
self.output_type = output_type
self.output_names = output_names
def _nhwc_to_nchw(x):
if isinstance(x, list):
return [TFStoredModel._nhwc_to_nchw(y) for y in x]
if isinstance(x, tuple):
return tuple((TFStoredModel._nhwc_to_nchw(y) for y in x))
if isinstance(x, dict):
return x.__class__([(k, TFStoredModel._nhwc_to_nchw(y)) for (k, y) in x.items()])
if ((len(tf.shape(x)) == 4) and (tf.shape(x)[(- 1)] == 3)):
return tf.transpose(x, (0, 3, 1, 2))
return x
def _nchw_to_nhwc(x):
if isinstance(x, list):
return [TFStoredModel._nchw_to_nhwc(y) for y in x]
if isinstance(x, tuple):
return tuple((TFStoredModel._nchw_to_nhwc(y) for y in x))
if isinstance(x, dict):
return {k: TFStoredModel._nchw_to_nhwc(y) for (k, y) in x.items()}
if ((len(tf.shape(x)) == 4) and (tf.shape(x)[(- 3)] == 3)):
return tf.transpose(x, (0, 2, 3, 1))
return x
def __call__(self, *args, **kwargs):
args = self._nhwc_to_nchw(args)
kwargs = self._nhwc_to_nchw(kwargs)
output = self.model.signatures['serving_default'](*args, **kwargs)
output = self._nchw_to_nhwc(output)
len_output = len(output.keys())
output = tuple((output[f'output_{i}'] for i in range(len_output)))
if (self.output_type == list):
return list(output)
elif (self.output_type is None):
return output[0]
elif (self.output_type == tuple):
return output
else:
assert (self.output_type in {dict, OrderedDict})
return self.output_type(zip(self.output_names, output))
onnx_path = f'{checkpoint}.onnx'
output_type = torch_module.to_onnx(onnx_path, export_params=True)
onnx_model = onnx.load(onnx_path)
output_names = [x.name for x in onnx_model.graph.output]
if ((output_type is None) and (len(output_names) > 0)):
output_type = tuple
assert (output_type in {OrderedDict, dict, tuple, list, None})
model = prepare(onnx_model, auto_cast=True)
model.export_graph(f'{checkpoint}.pb')
path = f'{checkpoint}.pb'
model = tf.saved_model.load(path)
model = TFStoredModel(model, output_type=output_type, output_names=output_names)
if hasattr(torch_module, 'config'):
setattr(model, 'config', torch_module.config)
return model |
class HuggingFaceBgeEmbeddings(langchain_core.pydantic_v1.BaseModel, langchain_core.embeddings.Embeddings):
client: Any
model_name: str = DEFAULT_BGE_MODEL
cache_folder: Optional[str] = None
model_kwargs: Dict[(str, Any)] = langchain_core.pydantic_v1.Field(default_factory=dict)
encode_kwargs: Dict[(str, Any)] = langchain_core.pydantic_v1.Field(default_factory=dict)
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
try:
import sentence_transformers
except ImportError as exc:
raise ImportError('Could not import sentence_transformers python package. Please install it with `pip install sentence_transformers`.') from exc
self.client = OptimizedSentenceTransformer(self.model_name, cache_folder=self.cache_folder, **self.model_kwargs)
if ('-zh' in self.model_name):
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH
class Config():
extra = langchain_core.pydantic_v1.Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
texts = [t.replace('\n', ' ') for t in texts]
embeddings = self.client.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
text = text.replace('\n', ' ')
embedding = self.client.encode((self.query_instruction + text), **self.encode_kwargs)
return embedding.tolist() |
def parse_faiss_specs(specs_str):
specs = []
for ss in specs_str.split():
comps = ss.split('_')
pca = 0
norm = False
n_clus = 0
sphere = False
for c in comps:
if c.startswith('PCA'):
pca = int(c[3:])
elif (c == 'NORM'):
norm = True
elif c.startswith('CLUS'):
n_clus = int(c[4:])
elif (c == 'SPHERICAL'):
sphere = True
assert (n_clus > 0)
specs.append(faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss))
return specs |
def test_build():
model = Myeffnet(clip_pretrain_path='/mnt/lustre/zhangyuanhan/architech/efficientnet_b4_ra2_320-7eb33cd5.pth')
image = torch.rand(2, 3, 224, 224)
output = model(image)
print(output.shape) |
class AttenResNet5(nn.Module):
def __init__(self, atten_activation, atten_channel=16, temperature=1, size1=(257, 1091), size2=(249, 1075), size3=(233, 1043), size4=(201, 979), size5=(137, 851)):
super(AttenResNet5, self).__init__()
self.temperature = temperature
self.pre = nn.Sequential(nn.Conv2d(1, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down1 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att1 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(4, 8)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip1 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down2 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att2 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(8, 16)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip2 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down3 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att3 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(16, 32)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip3 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down4 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att4 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(32, 64)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip4 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down5 = nn.MaxPool2d(kernel_size=3, stride=(1, 2))
self.att5 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(64, 128)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up5 = nn.UpsamplingBilinear2d(size=size5)
self.att6 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up4 = nn.UpsamplingBilinear2d(size=size4)
self.att7 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up3 = nn.UpsamplingBilinear2d(size=size3)
self.att8 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up2 = nn.UpsamplingBilinear2d(size=size2)
self.att9 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up1 = nn.UpsamplingBilinear2d(size=size1)
if (atten_channel == 1):
self.conv1 = nn.Sequential(nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, atten_channel, kernel_size=1, stride=1), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, 1, kernel_size=1, stride=1))
else:
self.conv1 = nn.Sequential(nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, (atten_channel / 4), kernel_size=1, stride=1), nn.BatchNorm2d((atten_channel / 4)), nn.ReLU(inplace=True), nn.Conv2d((atten_channel / 4), 1, kernel_size=1, stride=1))
if (atten_activation == 'softmax2'):
self.soft = nn.Softmax(dim=2)
if (atten_activation == 'softmax3'):
self.soft = nn.Softmax(dim=3)
self.cnn1 = nn.Conv2d(1, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn1 = nn.BatchNorm2d(16)
self.re1 = nn.ReLU(inplace=True)
self.cnn2 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn2 = nn.BatchNorm2d(16)
self.re2 = nn.ReLU(inplace=True)
self.cnn3 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.mp1 = nn.MaxPool2d(kernel_size=(1, 2))
self.cnn4 = nn.Conv2d(16, 32, kernel_size=(3, 3), dilation=(2, 2))
self.bn3 = nn.BatchNorm2d(32)
self.re3 = nn.ReLU(inplace=True)
self.cnn5 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn4 = nn.BatchNorm2d(32)
self.re4 = nn.ReLU(inplace=True)
self.cnn6 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp2 = nn.MaxPool2d(kernel_size=(1, 2))
self.cnn7 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4))
self.bn5 = nn.BatchNorm2d(32)
self.re5 = nn.ReLU(inplace=True)
self.cnn8 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn6 = nn.BatchNorm2d(32)
self.re6 = nn.ReLU(inplace=True)
self.cnn9 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp3 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn10 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4))
self.bn12 = nn.BatchNorm2d(32)
self.re12 = nn.ReLU(inplace=True)
self.cnn11 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn13 = nn.BatchNorm2d(32)
self.re13 = nn.ReLU(inplace=True)
self.cnn12 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp4 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn13 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(8, 8))
self.bn14 = nn.BatchNorm2d(32)
self.re14 = nn.ReLU(inplace=True)
self.cnn14 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn15 = nn.BatchNorm2d(32)
self.re15 = nn.ReLU(inplace=True)
self.cnn15 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp5 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn16 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(8, 8))
self.flat_feats = ((32 * 4) * 6)
self.ln1 = nn.Linear(self.flat_feats, 32)
self.bn7 = nn.BatchNorm1d(32)
self.re7 = nn.ReLU(inplace=True)
self.ln2 = nn.Linear(32, 32)
self.bn8 = nn.BatchNorm1d(32)
self.re8 = nn.ReLU(inplace=True)
self.ln3 = nn.Linear(32, 32)
self.bn9 = nn.BatchNorm1d(32)
self.re9 = nn.ReLU(inplace=True)
self.ln4 = nn.Linear(32, 32)
self.bn10 = nn.BatchNorm1d(32)
self.re10 = nn.ReLU(inplace=True)
self.ln5 = nn.Linear(32, 32)
self.bn11 = nn.BatchNorm1d(32)
self.re11 = nn.ReLU(inplace=True)
self.ln6 = nn.Linear(32, 1)
self.sigmoid = nn.Sigmoid()
def _weights_init(m):
if isinstance(m, (nn.Conv2d or nn.Linear)):
xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d or nn.BatchNorm1d)):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
def forward(self, x):
residual = x
x = self.att1(self.down1(self.pre(x)))
skip1 = self.skip1(x)
x = self.att2(self.down2(x))
skip2 = self.skip2(x)
x = self.att3(self.down3(x))
skip3 = self.skip3(x)
x = self.att4(self.down4(x))
skip4 = self.skip4(x)
x = self.att5(self.down5(x))
x = self.att6((skip4 + self.up5(x)))
x = self.att7((skip3 + self.up4(x)))
x = self.att8((skip2 + self.up3(x)))
x = self.att9((skip1 + self.up2(x)))
x = self.conv1(self.up1(x))
weight = self.soft((x / self.temperature))
x = ((1 + weight) * residual)
x = self.cnn1(x)
residual = x
x = self.cnn3(self.re2(self.bn2(self.cnn2(self.re1(self.bn1(x))))))
x += residual
x = self.cnn4(self.mp1(x))
residual = x
x = self.cnn6(self.re4(self.bn4(self.cnn5(self.re3(self.bn3(x))))))
x += residual
x = self.cnn7(self.mp2(x))
residual = x
x = self.cnn9(self.re6(self.bn6(self.cnn8(self.re5(self.bn5(x))))))
x += residual
x = self.cnn10(self.mp3(x))
residual = x
x = self.cnn12(self.re13(self.bn13(self.cnn11(self.re12(self.bn12(x))))))
x += residual
x = self.cnn13(self.mp4(x))
residual = x
x = self.cnn15(self.re15(self.bn15(self.cnn14(self.re14(self.bn14(x))))))
x += residual
x = self.cnn16(self.mp5(x))
x = x.view((- 1), self.flat_feats)
x = self.ln1(x)
residual = x
x = self.ln3(self.re8(self.bn8(self.ln2(self.re7(self.bn7(x))))))
x += residual
residual = x
x = self.ln5(self.re10(self.bn10(self.ln4(self.re9(self.bn9(x))))))
x += residual
out = self.sigmoid(self.ln6(self.re11(self.bn11(x))))
return (out, weight) |
class GCN(nn.Module):
def __init__(self, g, num_layers, in_dim, num_hidden, num_classes, heads, activation, feat_drop, attn_drop, negative_slope, residual):
super(GCN, self).__init__()
self.g = g
self.gat_layers = []
self.num_layers = num_layers
self.gat_layers = nn.ModuleList()
self.gat_layers.append(GraphConv(in_dim, num_hidden))
for _ in range(1, num_layers):
self.gat_layers.append(GraphConv(num_hidden, num_hidden))
self.gat_layers.append(GraphConv(num_hidden, num_classes))
def forward(self, inputs, middle=False):
h = inputs
middle_feats = []
for l in range(self.num_layers):
h = self.gat_layers[l](self.g, h)
middle_feats.append(h)
h = F.relu(h)
logits = self.gat_layers[(- 1)](self.g, h)
if middle:
return (logits, middle_feats)
return logits |
def validate(val_loader, config) -> None:
val_losses = defaultdict(list)
i_val_step = 0
for input in val_loader:
i_val_step += 1
input = input.to(config.device)
(loss_dict, anomaly_map, anomaly_score, input_recon) = vae_val_step(input)
for (k, v) in loss_dict.items():
val_losses[k].append(v.item())
if (i_val_step >= config.val_steps):
break
log_msg = 'Validation losses on normal samples: '
log_msg += ' - '.join([f'{k}: {np.mean(v):.4f}' for (k, v) in val_losses.items()])
print(log_msg)
log({f'val/{k}': np.mean(v) for (k, v) in val_losses.items()}, config)
log({'val/input': input, 'val/recon': input_recon, 'val/res': anomaly_map}, config)
return np.mean(val_losses['loss']) |
def preprocess_for_reward_modeling(df: pd.DataFrame, prompt_dict: dict, tokenizer: transformers.PreTrainedTokenizer, df_postprocessor: Optional[Callable]=None, end_sequence_with_eos: bool=False, verbose=True) -> dict[(str, torch.Tensor)]:
if (df_postprocessor is not None):
df = df_postprocessor(df)
list_dict_data = df.to_dict(orient='records')
(index_0, index_1) = tuple((torch.full(size=(len(list_dict_data), 1), fill_value=fill_value, dtype=torch.long) for fill_value in (0, 1)))
def _get_numeric_preference(example: dict):
return {1: 0, 2: 1}[example['preference']]
choice = torch.tensor([[_get_numeric_preference(dict_data)] for dict_data in list_dict_data])
def _get_text(example: dict, output_key: str):
source = format_prompt(example, prompt_dict=prompt_dict)
target = format_output(example, eos_token=(tokenizer.eos_token if end_sequence_with_eos else None), output_key=output_key)
return (source + target)
(text_list_0, text_list_1) = tuple(([_get_text(dict_data, key) for dict_data in list_dict_data] for key in ('output_1', 'output_2')))
def _merge_tokenization_metadata(metadata_list: Sequence[dict]) -> dict:
num_examples = sum((metadata['num_examples'] for metadata in metadata_list))
num_truncated_tokens = sum((metadata['num_truncated_tokens'] for metadata in metadata_list))
num_truncated_examples = sum((metadata['num_truncated_examples'] for metadata in metadata_list))
input_ids_avg_lens = (sum([(metadata['input_ids_avg_len'] * metadata['num_examples']) for metadata in metadata_list]) / num_examples)
input_ids_max_len = max((metadata['input_ids_max_len'] for metadata in metadata_list))
input_ids_min_len = min((metadata['input_ids_min_len'] for metadata in metadata_list))
labels_avg_lens = (sum([(metadata['labels_avg_len'] * metadata['num_examples']) for metadata in metadata_list]) / num_examples)
labels_max_len = max((metadata['labels_max_len'] for metadata in metadata_list))
labels_min_len = min((metadata['labels_min_len'] for metadata in metadata_list))
return dict(num_examples=num_examples, num_truncated_tokens=num_truncated_tokens, num_truncated_examples=num_truncated_examples, input_ids_avg_len=input_ids_avg_lens, input_ids_max_len=input_ids_max_len, input_ids_min_len=input_ids_min_len, labels_avg_len=labels_avg_lens, labels_max_len=labels_max_len, labels_min_len=labels_min_len)
logger.warning(f'Tokenizing {len(list_dict_data)} pairs...')
(tokenized_0, tokenized_1) = tuple((_tokenize_fn(text_list, tokenizer) for text_list in (text_list_0, text_list_1)))
input_ids = [list(pair) for pair in utils.zip_(tokenized_0['input_ids'], tokenized_1['input_ids'])]
labels = [list(pair) for pair in utils.zip_(tokenized_0['labels'], tokenized_1['labels'])]
tokenization_metadata = _merge_tokenization_metadata([tokenized_0['tokenization_metadata'], tokenized_1['tokenization_metadata']])
packaged_data = dict(input_ids=input_ids, labels=labels, index_0=index_0, index_1=index_1, choice=choice, tokenization_metadata=tokenization_metadata, metadata=dict(mean_choice=choice.float().mean().item()))
if verbose:
logger.warning(f'''Tokenization metadata:
{utils.jdumps(packaged_data['tokenization_metadata'])}''')
return packaged_data |
class PGReLU(torch.nn.Module):
def __init__(self):
super(PGReLU, self).__init__()
def forward(self, x):
return PGReLUFunc.apply(x) |
class InfiniteDataLoader():
def __init__(self, dataset, weights, batch_size, num_workers):
super().__init__()
self.dataset = dataset
if weights:
sampler = torch.utils.data.WeightedRandomSampler(weights, replacement=True, num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset, replacement=True)
if (weights == None):
weights = torch.ones(len(dataset))
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=True)
self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler)))
def __iter__(self):
while True:
(yield next(self._infinite_iterator))
def __len__(self):
raise ValueError |
def dataset_generation():
data_dir = tf.keras.utils.get_file('flower_photos', origin=flower_dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
train_ds = tf.keras.utils.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(100).prefetch(buffer_size=AUTOTUNE)
num_classes = len(class_names)
return (num_classes, train_ds, val_ds) |
def evaluate_3rd_item_task_fastgcnnew(valid_batch_index, model, sess, valid_data, is_training):
(evaluate_loss, evaluate_pearson) = (0.0, 0.0)
(valid_target_item, valid_k_shot_user, valid_second_order_items, valid_third_order_users, valid_oracle_item_ebd, valid_mask_num_second_order_item, valid_mask_num_third_order_user) = valid_data
for index in tqdm.tqdm(valid_batch_index):
(batch_target_item, batch_kshot_user, batch_2nd_item, batch_3rd_user, batch_oracle_item_ebd, batch_mask_num_2nd_item, batch_mask_num_3rd_user) = gfn.split_batch_item(valid_target_item, valid_k_shot_user, valid_second_order_items, valid_third_order_users, valid_oracle_item_ebd, valid_mask_num_second_order_item, valid_mask_num_third_order_user, index)
feed_dict = {model.target_item: batch_oracle_item_ebd, model.support_user_1st_pos_: batch_kshot_user, model.training_phrase_user_task: is_training, model.support_item_2nd_pos_: batch_2nd_item, model.training_phrase_item_task: is_training, model.support_user_3rd_pos: batch_3rd_user}
(batch_evaluate_loss, batch_predict_ebd, batch_target_ebd) = sess.run([model.loss_3rd_item_pos, model.predict_i_3rd_pos, model.target_item], feed_dict)
evaluate_loss += batch_evaluate_loss
batch_pearson = Pearson_correlation(batch_predict_ebd, batch_target_ebd)
evaluate_pearson += batch_pearson
return ((evaluate_loss / len(valid_batch_index)), (evaluate_pearson / len(valid_batch_index))) |
def main(args):
config = Config.load_config_json(os.path.join(args.log_dir, 'config.json'))
if config.caption_model.endswith('_prune'):
config.caption_model = replace_from_right(config.caption_model, '_prune', '', 1)
config.update({k: v for (k, v) in vars(args).items() if (v is not None)})
ckpt_path = os.path.join(args.log_dir, args.model_file)
state_dict = torch.load(ckpt_path)
if args.load_as_float16:
config.eval_dir_suffix = (f'{config.eval_dir_suffix}_float16' if config.eval_dir_suffix else 'float16')
state_dict = {k: (v.to(torch.float16) if isinstance(v, torch.Tensor) else v) for (k, v) in state_dict.items()}
torch.save(state_dict, ckpt_path.replace('.pth', '_float16.pth'))
state_dict = densify_state_dict(state_dict)
logger.info(f'Model weights loaded from `{ckpt_path}`')
if ((args.beam_size_val > 0) and (args.beam_size_test > 0)):
raise ValueError('`beam_size_val` and `beam_size_test` cannot both be > 0')
if (args.beam_size_val > 0):
split = 'val'
elif (args.beam_size_test > 0):
split = 'test'
else:
raise ValueError('One of `beam_size_val` or `beam_size_test` must be > 0')
return TrainingModule.eval_model(state_dict=state_dict, config=config, split=split) |
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 100
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (batch, _)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
(images, bool_masked_pos) = batch
samples = images.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True).flatten(1).to(torch.bool)
if args.bf16:
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
(loss, _, _) = model(samples, mask=bool_masked_pos)
else:
with torch.cuda.amp.autocast():
(loss, _, _) = model(samples, mask=bool_masked_pos)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
loss = (loss / accum_iter)
loss_scaler(loss, optimizer, parameters=model.parameters(), update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]['lr']
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if ((log_writer is not None) and (((data_iter_step + 1) % accum_iter) == 0)):
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def download_at(at_hash, path, archive_name):
r = at.get(at_hash, datastore=path, showlogs=True)
with zipfile.ZipFile(os.path.join(r, archive_name), 'r') as zip_ref:
zip_ref.extractall(os.path.join(r))
os.remove(os.path.join(r, archive_name)) |
def histogram_cli_args(base_cli_dir_args: typing.List[str], temp_dir: pathlib.Path) -> typing.List[str]:
return (base_cli_dir_args + f'-o {temp_dir}/hist.png'.split()) |
class Normalize():
def __init__(self, mean=(122.675, 116.669, 104.008)):
self.mean = mean
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[(..., 0)] = (imgarr[(..., 2)] - self.mean[2])
proc_img[(..., 1)] = (imgarr[(..., 1)] - self.mean[1])
proc_img[(..., 2)] = (imgarr[(..., 0)] - self.mean[0])
return proc_img |
class BasicNet(nn.Module):
def __init__(self, do_batchnorm, channels, weight, pool, num_classes=10, initial_channels=1, new_num_classes=None, **kw):
super().__init__()
self.new_num_classes = new_num_classes
self.prep = ConvBN(do_batchnorm, initial_channels, channels['prep'], **kw)
self.layer1 = ConvBN(do_batchnorm, channels['prep'], channels['layer1'], pool=pool, **kw)
self.res1 = Residual(do_batchnorm, channels['layer1'], **kw)
self.layer2 = ConvBN(do_batchnorm, channels['layer1'], channels['layer2'], pool=pool, **kw)
self.layer3 = ConvBN(do_batchnorm, channels['layer2'], channels['layer3'], pool=pool, **kw)
self.res3 = Residual(do_batchnorm, channels['layer3'], **kw)
self.pool = nn.MaxPool2d(4)
self.linear = nn.Linear(channels['layer3'], num_classes, bias=False)
self.classifier = Mul(weight)
def forward(self, x):
out = self.prep(x)
out = self.res1(self.layer1(out))
out = self.layer2(out)
out = self.res3(self.layer3(out))
out = self.pool(out).view(out.size()[0], (- 1))
out = self.classifier(self.linear(out))
return out
def finetune_parameters(self, iid, channels, weight, pool, **kw):
self.linear = nn.Linear(channels['layer3'], self.new_num_classes, bias=False)
self.classifier = Mul(weight)
modules = [self.linear, self.classifier]
for m in modules:
for p in m.parameters():
p.requires_grad = True
return itertools.chain.from_iterable([m.parameters() for m in modules]) |
def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> torch.optim.lr_scheduler._LRScheduler:
name = cfg.SOLVER.LR_SCHEDULER_NAME
if (name == 'WarmupMultiStepLR'):
steps = [x for x in cfg.SOLVER.STEPS if (x <= cfg.SOLVER.MAX_ITER)]
if (len(steps) != len(cfg.SOLVER.STEPS)):
logger = logging.getLogger(__name__)
logger.warning('SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. These values will be ignored.')
sched = MultiStepParamScheduler(values=[(cfg.SOLVER.GAMMA ** k) for k in range((len(steps) + 1))], milestones=steps, num_updates=cfg.SOLVER.MAX_ITER)
elif (name == 'WarmupCosineLR'):
end_value = (cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR)
assert ((end_value >= 0.0) and (end_value <= 1.0)), end_value
sched = CosineParamScheduler(1, end_value)
elif (name == 'WarmupStepWithFixedGammaLR'):
sched = StepWithFixedGammaParamScheduler(base_value=1.0, gamma=cfg.SOLVER.GAMMA, num_decays=cfg.SOLVER.NUM_DECAYS, num_updates=cfg.SOLVER.MAX_ITER)
else:
raise ValueError('Unknown LR scheduler: {}'.format(name))
sched = WarmupParamScheduler(sched, cfg.SOLVER.WARMUP_FACTOR, min((cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER), 1.0), cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.RESCALE_INTERVAL)
return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) |
def qkv_attention(query, key, value, mask=None, dropout=None):
d_k = query.size((- 1))
scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / sqrt(d_k))
if (mask is not None):
scores.data.masked_fill_(mask.data.eq(0), (- .0))
p_attn = F.softmax(scores, dim=(- 1))
if (dropout is not None):
p_attn = dropout(p_attn)
return (torch.matmul(p_attn, value), p_attn) |
def rand_augment_transform(config_str, hparams, use_cmc=False):
magnitude = _MAX_LEVEL
num_layers = 2
weight_idx = None
config = config_str.split('-')
assert (config[0] == 'rand')
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
elif (key == 'm'):
magnitude = int(val)
elif (key == 'n'):
num_layers = int(val)
elif (key == 'w'):
weight_idx = int(val)
else:
assert False, 'Unknown RandAugment config section'
if use_cmc:
ra_ops = rand_augment_ops_cmc(magnitude=magnitude, hparams=hparams)
else:
ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams)
choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx))
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) |
def squeezenet1_1(pretrained=False, **kwargs):
model = SqueezeNet(1.1)
if pretrained:
model.load_state_dict(torch.load(os.path.join(models_dir, squeeze1_1_model_name)))
return model |
def transform_key_func(generator, n, vocab_size, eff_vocab_size=None):
pi = torch.randperm(vocab_size, generator=generator)
xi = torch.rand((n, 1), generator=generator)
return (xi, pi) |
def convert_trax_checkpoint_to_pytorch(trax_model_pkl_path, config_file, pytorch_dump_path):
config = ReformerConfig.from_json_file(config_file)
print('Building PyTorch model from configuration: {}'.format(str(config)))
model = ReformerModelWithLMHead(config)
with open(trax_model_pkl_path, 'rb') as f:
model_weights = pickle.load(f)['weights']
set_model_weights_in_torch(model_weights, model, config.hidden_size)
print('Save PyTorch model to {}'.format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path) |
def resnet_v1_101(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_101'):
blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 3) + [(512, 128, 2)])), resnet_utils.Block('block3', bottleneck, (([(1024, 256, 1)] * 22) + [(1024, 256, 2)])), resnet_utils.Block('block4', bottleneck, ([(2048, 512, 1)] * 3))]
return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope) |
class NeptuneCallback(TrainerCallback):
integration_version_key = 'source_code/integrations/transformers'
model_parameters_key = 'model_parameters'
trial_name_key = 'trial'
trial_params_key = 'trial_params'
trainer_parameters_key = 'trainer_parameters'
flat_metrics = {'train/epoch'}
def __init__(self, *, api_token: Optional[str]=None, project: Optional[str]=None, name: Optional[str]=None, base_namespace: str='finetuning', run=None, log_parameters: bool=True, log_checkpoints: Optional[str]=None, **neptune_run_kwargs):
if (not is_neptune_available()):
raise ValueError('NeptuneCallback requires the Neptune client library to be installed. To install the library, run `pip install neptune`.')
try:
from neptune import Run
from neptune.internal.utils import verify_type
except ImportError:
from neptune.new.internal.utils import verify_type
from neptune.new.metadata_containers.run import Run
verify_type('api_token', api_token, (str, type(None)))
verify_type('project', project, (str, type(None)))
verify_type('name', name, (str, type(None)))
verify_type('base_namespace', base_namespace, str)
verify_type('run', run, (Run, type(None)))
verify_type('log_parameters', log_parameters, bool)
verify_type('log_checkpoints', log_checkpoints, (str, type(None)))
self._base_namespace_path = base_namespace
self._log_parameters = log_parameters
self._log_checkpoints = log_checkpoints
self._initial_run: Optional[Run] = run
self._run = None
self._is_monitoring_run = False
self._run_id = None
self._force_reset_monitoring_run = False
self._init_run_kwargs = {'api_token': api_token, 'project': project, 'name': name, **neptune_run_kwargs}
self._volatile_checkpoints_dir = None
self._should_upload_checkpoint = (self._log_checkpoints is not None)
self._recent_checkpoint_path = None
if (self._log_checkpoints in {'last', 'best'}):
self._target_checkpoints_namespace = f'checkpoints/{self._log_checkpoints}'
self._should_clean_recently_uploaded_checkpoint = True
else:
self._target_checkpoints_namespace = 'checkpoints'
self._should_clean_recently_uploaded_checkpoint = False
def _stop_run_if_exists(self):
if self._run:
self._run.stop()
del self._run
self._run = None
def _initialize_run(self, **additional_neptune_kwargs):
from neptune.new import init_run
from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException
self._stop_run_if_exists()
try:
self._run = init_run(**self._init_run_kwargs, **additional_neptune_kwargs)
self._run_id = self._run['sys/id'].fetch()
except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e:
raise NeptuneMissingConfiguration() from e
def _use_initial_run(self):
self._run = self._initial_run
self._is_monitoring_run = True
self._run_id = self._run['sys/id'].fetch()
self._initial_run = None
def _ensure_run_with_monitoring(self):
if (self._initial_run is not None):
self._use_initial_run()
else:
if ((not self._force_reset_monitoring_run) and self._is_monitoring_run):
return
if (self._run and (not self._is_monitoring_run) and (not self._force_reset_monitoring_run)):
self._initialize_run(run=self._run_id)
self._is_monitoring_run = True
else:
self._initialize_run()
self._force_reset_monitoring_run = False
def _ensure_at_least_run_without_monitoring(self):
if (self._initial_run is not None):
self._use_initial_run()
elif (not self._run):
self._initialize_run(run=self._run_id, capture_stdout=False, capture_stderr=False, capture_hardware_metrics=False, capture_traceback=False)
self._is_monitoring_run = False
def run(self):
if (self._run is None):
self._ensure_at_least_run_without_monitoring()
return self._run
def _metadata_namespace(self):
return self.run[self._base_namespace_path]
def _log_integration_version(self):
self.run[NeptuneCallback.integration_version_key] = version
def _log_trainer_parameters(self, args):
self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict()
def _log_model_parameters(self, model):
if (model and hasattr(model, 'config') and (model.config is not None)):
self._metadata_namespace[NeptuneCallback.model_parameters_key] = model.config.to_dict()
def _log_hyper_param_search_parameters(self, state):
if (state and hasattr(state, 'trial_name')):
self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name
if (state and hasattr(state, 'trial_params') and (state.trial_params is not None)):
self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params
def _log_model_checkpoint(self, source_directory: str, checkpoint: str):
target_path = relative_path = os.path.join(source_directory, checkpoint)
if (self._volatile_checkpoints_dir is not None):
consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint)
try:
cpkt_path = relative_path.replace('..', '').lstrip(os.path.sep)
copy_path = os.path.join(consistent_checkpoint_path, cpkt_path)
shutil.copytree(relative_path, copy_path)
target_path = consistent_checkpoint_path
except IOError as e:
logger.warning("NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{}'.Could fail trying to upload.".format(e))
self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path)
if (self._should_clean_recently_uploaded_checkpoint and (self._recent_checkpoint_path is not None)):
self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path)
self._recent_checkpoint_path = relative_path
def on_init_end(self, args, state, control, **kwargs):
self._volatile_checkpoints_dir = None
if (self._log_checkpoints and (args.overwrite_output_dir or (args.save_total_limit is not None))):
self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name
if ((self._log_checkpoints == 'best') and (not args.load_best_model_at_end)):
raise ValueError('To save the best model checkpoint, the load_best_model_at_end argument must be enabled.')
def on_train_begin(self, args, state, control, model=None, **kwargs):
if (not state.is_world_process_zero):
return
self._ensure_run_with_monitoring()
self._force_reset_monitoring_run = True
self._log_integration_version()
if self._log_parameters:
self._log_trainer_parameters(args)
self._log_model_parameters(model)
if state.is_hyper_param_search:
self._log_hyper_param_search_parameters(state)
def on_train_end(self, args, state, control, **kwargs):
self._stop_run_if_exists()
def __del__(self):
if (self._volatile_checkpoints_dir is not None):
shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True)
self._stop_run_if_exists()
def on_save(self, args, state, control, **kwargs):
if self._should_upload_checkpoint:
self._log_model_checkpoint(args.output_dir, f'checkpoint-{state.global_step}')
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
if (self._log_checkpoints == 'best'):
best_metric_name = args.metric_for_best_model
if (not best_metric_name.startswith('eval_')):
best_metric_name = f'eval_{best_metric_name}'
metric_value = metrics.get(best_metric_name)
operator = (np.greater if args.greater_is_better else np.less)
self._should_upload_checkpoint = ((state.best_metric is None) or operator(metric_value, state.best_metric))
def get_run(cls, trainer):
for callback in trainer.callback_handler.callbacks:
if isinstance(callback, cls):
return callback.run
raise Exception("The trainer doesn't have a NeptuneCallback configured.")
def on_log(self, args, state, control, logs: Optional[Dict[(str, float)]]=None, **kwargs):
if (not state.is_world_process_zero):
return
if (logs is not None):
for (name, value) in rewrite_logs(logs).items():
if isinstance(value, (int, float)):
if (name in NeptuneCallback.flat_metrics):
self._metadata_namespace[name] = value
else:
self._metadata_namespace[name].log(value, step=state.global_step) |
def checkpoint_best_train_cb(checkpoint_path, steps_per_epoch=(- 1), num_epochs=10):
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(checkpoint_path, 'cp-best-train.ckpt'), monitor='loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', save_freq=('epoch' if (steps_per_epoch < 0) else int((num_epochs * steps_per_epoch))))
return checkpoint_callback |
def write_position_dependent_lexicon(lexiconp, separator):
for (word, prob, phones) in lexiconp:
phones_length = len(phones)
suffix_list = ['_I' for i in range(phones_length)]
if is_end(word, separator):
suffix_list[(- 1)] = '_E'
phones_list = [(phone + suffix) for (phone, suffix) in zip(phones, suffix_list)]
print('{} {} {}'.format(word, prob, ' '.join(phones_list)))
if (phones_length == 1):
suffix_list[0] = '_S'
phones_list = [(phone + suffix) for (phone, suffix) in zip(phones, suffix_list)]
print('{} {} {}'.format(word, prob, ' '.join(phones_list)))
else:
suffix_list[0] = '_B'
phones_list = [(phone + suffix) for (phone, suffix) in zip(phones, suffix_list)]
print('{} {} {}'.format(word, prob, ' '.join(phones_list)))
else:
phones_list = [(phone + suffix) for (phone, suffix) in zip(phones, suffix_list)]
print('{} {} {}'.format(word, prob, ' '.join(phones_list)))
suffix_list[0] = '_B'
phones_list = [(phone + suffix) for (phone, suffix) in zip(phones, suffix_list)]
print('{} {} {}'.format(word, prob, ' '.join(phones_list))) |
class Registry():
def __init__(self, name, build_func=None, parent=None, scope=None):
self._name = name
self._module_dict = dict()
self._children = dict()
self._scope = (self.infer_scope() if (scope is None) else scope)
if (build_func is None):
if (parent is not None):
self.build_func = parent.build_func
else:
self.build_func = build_from_cfg
else:
self.build_func = build_func
if (parent is not None):
assert isinstance(parent, Registry)
parent._add_children(self)
self.parent = parent
else:
self.parent = None
def __len__(self):
return len(self._module_dict)
def __contains__(self, key):
return (self.get(key) is not None)
def __repr__(self):
format_str = (self.__class__.__name__ + f'(name={self._name}, items={self._module_dict})')
return format_str
def infer_scope():
filename = inspect.getmodule(inspect.stack()[2][0]).__name__
split_filename = filename.split('.')
return split_filename[0]
def split_scope_key(key):
split_index = key.find('.')
if (split_index != (- 1)):
return (key[:split_index], key[(split_index + 1):])
else:
return (None, key)
def name(self):
return self._name
def scope(self):
return self._scope
def module_dict(self):
return self._module_dict
def children(self):
return self._children
def get(self, key):
(scope, real_key) = self.split_scope_key(key)
if ((scope is None) or (scope == self._scope)):
if (real_key in self._module_dict):
return self._module_dict[real_key]
elif (scope in self._children):
return self._children[scope].get(real_key)
else:
parent = self.parent
while (parent.parent is not None):
parent = parent.parent
return parent.get(key)
def build(self, *args, **kwargs):
return self.build_func(*args, **kwargs, registry=self)
def _add_children(self, registry):
assert isinstance(registry, Registry)
assert (registry.scope is not None)
assert (registry.scope not in self.children), f'scope {registry.scope} exists in {self.name} registry'
self.children[registry.scope] = registry
def _register_module(self, module_class, module_name=None, force=False):
if (not inspect.isclass(module_class)):
raise TypeError(f'module must be a class, but got {type(module_class)}')
if (module_name is None):
module_name = module_class.__name__
if isinstance(module_name, str):
module_name = [module_name]
for name in module_name:
if ((not force) and (name in self._module_dict)):
raise KeyError(f'{name} is already registered in {self.name}')
self._module_dict[name] = module_class
def deprecated_register_module(self, cls=None, force=False):
warnings.warn('The old API of register_module(module, force=False) is deprecated and will be removed, please use the new API register_module(name=None, force=False, module=None) instead.')
if (cls is None):
return partial(self.deprecated_register_module, force=force)
self._register_module(cls, force=force)
return cls
def register_module(self, name=None, force=False, module=None):
if (not isinstance(force, bool)):
raise TypeError(f'force must be a boolean, but got {type(force)}')
if isinstance(name, type):
return self.deprecated_register_module(name, force=force)
if (not ((name is None) or isinstance(name, str) or is_seq_of(name, str))):
raise TypeError(f'name must be either of None, an instance of str or a sequence of str, but got {type(name)}')
if (module is not None):
self._register_module(module_class=module, module_name=name, force=force)
return module
def _register(cls):
self._register_module(module_class=cls, module_name=name, force=force)
return cls
return _register |
def single_process_main(args):
(args, (trainset, valset, num_train, num_val), model) = setup(args)
criterion = nn.CrossEntropyLoss()
if is_master(args.rank):
logging(('# of Parameters: %d' % sum([param.numel() for param in model.parameters()])), args.log)
if is_distributed(args.rank):
model = init_distributed(model, args.rank, args.local_rank)
(train_loader, train_sampler, val_loader) = init_dataloader(args, trainset, valset)
epochs = args.epochs
log = args.log
opt = args.opt
lr_warmup = args.warmup_updates
init_lr = args.init_lr
hyper1 = args.opt_h1
hyper2 = args.opt_h2
eps = args.eps
rebound = args.rebound
lr_decay = args.lr_decay
decay_rate = args.decay_rate
milestone = args.milestone
weight_decay = args.weight_decay
weight_decay_type = args.weight_decay_type
last_lr = args.last_lr
(optimizer, scheduler, opt_param) = get_optimizer(opt, args.lr, model.parameters(), hyper1, hyper2, eps, rebound, lr_decay=lr_decay, decay_rate=decay_rate, milestone=milestone, weight_decay=weight_decay, weight_decay_type=weight_decay_type, warmup_updates=lr_warmup, init_lr=init_lr, last_lr=last_lr, num_epochs=epochs, world_size=args.world_size)
if args.recover:
checkpoint_name = args.checkpoint_name
print(f'Rank = {args.rank}, loading from checkpoint {checkpoint_name}')
checkpoint = torch.load(checkpoint_name, map_location=args.device)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
start_epoch = checkpoint['epoch']
best_epoch = checkpoint['best_epoch']
best_top1 = checkpoint['best_top1']
best_top5 = checkpoint['best_top5']
best_loss = checkpoint['best_loss']
if is_master(args.rank):
numbers = checkpoint['numbers']
del checkpoint
with torch.no_grad():
logging('Evaluating after resuming model...', log)
eval(args, val_loader, model, criterion)
else:
numbers = None
del checkpoint
else:
start_epoch = 1
best_epoch = 0
best_top1 = 0
best_top5 = 0
best_loss = 0
if is_master(args.rank):
numbers = {'train loss': [], 'train acc': [], 'test loss': [], 'test acc': []}
else:
numbers = None
for epoch in range(start_epoch, (epochs + 1)):
if is_distributed(args.rank):
train_sampler.set_epoch(epoch)
lr = scheduler.get_last_lr()[0]
if is_master(args.rank):
logging('Epoch: {}/{} ({}, lr={:.6f}, {})'.format(epoch, epochs, opt, lr, opt_param), log)
(train_loss, train_top1, train_top5) = train(args, train_loader, num_train, model, criterion, optimizer)
scheduler.step()
if is_master(args.rank):
with torch.no_grad():
(loss, top1, top5) = eval(args, val_loader, model, criterion)
if (top1 > best_top1):
best_top1 = top1
best_top5 = top5
best_loss = loss
best_epoch = epoch
logging('Best loss: {:.4f}, top1: {:.2f}%, top5: {:.2f}%, epoch: {}'.format(best_loss, best_top1, best_top5, best_epoch), args.log)
numbers['train loss'].append(train_loss)
numbers['test loss'].append(loss)
numbers['train acc'].append(train_top1)
numbers['test acc'].append(top1)
json.dump(numbers, open(os.path.join(args.model_path, 'values.run{}.json'.format(args.run)), 'w'))
checkpoint_name = args.checkpoint_name
torch.save({'epoch': (epoch + 1), 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'best_epoch': best_epoch, 'best_top1': best_top1, 'best_top5': best_top5, 'best_loss': best_loss, 'numbers': numbers}, checkpoint_name) |
def split_glas(args):
os.makedirs(args.fold_folder, exist_ok=True)
classes = ['benign', 'malignant']
datasetname = args.dataset
dict_classes_names = {'benign': 0, 'malignant': 1}
baseurl = args.baseurl
pre = 'Warwick_QU_Dataset_(Released_2016_07_08)'
trainsamples = dict()
testsamples = dict()
ext = args.img_extension
with open(join(baseurl, pre, 'Grade.csv'), 'r') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for row in reader:
row = [r.replace(' ', '') for r in row]
msg = 'The class `{}` is not within the predefined classes `{}`'.format(row[2], classes)
assert (row[2] in classes), msg
key = f'{pre}/{row[0]}.{ext}'
c_s = (f'{pre}/{row[0]}_anno.{ext}', row[2])
assert (key not in trainsamples)
assert (key not in testsamples)
if row[0].startswith('train'):
trainsamples[key] = c_s
elif row[0].startswith('test'):
testsamples[key] = c_s
assert ((len(trainsamples.keys()) + len(testsamples.keys())) == 165)
assert (len(testsamples.keys()) == 80), len(testsamples.keys())
benign = [s for s in trainsamples.keys() if (trainsamples[s][1] == 'benign')]
malignant = [s for s in trainsamples.keys() if (trainsamples[s][1] == 'malignant')]
os.makedirs(args.fold_folder, exist_ok=True)
with open(join(args.fold_folder, 'encoding.yaml'), 'w') as f:
yaml.dump(dict_classes_names, f)
for _ in range(1000):
random.shuffle(benign)
random.shuffle(malignant)
def create_kfolds():
vl_size_benign = math.ceil(((len(benign) * args.folding['vl']) / 100.0))
vl_size_malignant = math.ceil(((len(malignant) * args.folding['vl']) / 100.0))
list_folds_benign = create_folds_of_one_class(benign, (len(benign) - vl_size_benign), vl_size_benign)
list_folds_malignant = create_folds_of_one_class(malignant, (len(malignant) - vl_size_malignant), vl_size_malignant)
assert (len(list_folds_benign) == len(list_folds_malignant))
print('We found {} folds .... [OK]'.format(len(list_folds_malignant)))
outd = args.fold_folder
for i in range(args.nbr_folds):
print(f'Creating fold {i}')
train = (list_folds_malignant[i][0] + list_folds_benign[i][0])
for t in range(1000):
random.shuffle(train)
dump_subset(fold_folder=args.fold_folder, fold=i, subset=constants.TRAINSET, samples={kk: trainsamples[kk] for kk in train}, encoding=dict_classes_names, dataset=datasetname)
dump_subset(fold_folder=args.fold_folder, fold=i, subset=constants.TESTSET, samples=testsamples, encoding=dict_classes_names, dataset=datasetname)
validcl = (list_folds_malignant[i][1] + list_folds_benign[i][1])
dump_subset(fold_folder=args.fold_folder, fold=i, subset=constants.CLVALIDSET, samples={kk: trainsamples[kk] for kk in validcl}, encoding=dict_classes_names, dataset=datasetname)
n = args.vl_sup_per_cl
validpx = random.sample(list_folds_malignant[i][1], n)
validpx += random.sample(list_folds_benign[i][1], n)
dump_subset(fold_folder=args.fold_folder, fold=i, subset=constants.PXVALIDSET, samples={kk: trainsamples[kk] for kk in validpx}, encoding=dict_classes_names, dataset=datasetname)
create_kfolds()
print(f'All {datasetname} splitting ({args.nbr_folds}) ended with success [OK].') |
def reweighting_all_cliques(mc):
from itertools import combinations
cd = []
for c in mc:
for d in range(2, (len(c) + 1)):
cd.extend(list(combinations(c, d)))
cd = list(set(cd))
cd = dict.fromkeys(cd, 0)
for c in mc:
for d in range(2, (len(c) + 1)):
for comb in combinations(c, d):
cd[comb] += 1
return cd |
def getnodes(tree, nodelist):
nodelist.append(tree)
for (child_name, child) in tree.children():
getnodes(child, nodelist) |
def add_bel_io(x, y, z):
bel = len(bel_name)
bel_name.append((x, y, ('io%d' % z)))
bel_type.append('SB_IO')
bel_pos.append((x, y, z))
bel_wires.append(list())
wire_cen = wire_names[(x, y, 'io_global/cen')]
wire_iclk = wire_names[(x, y, 'io_global/inclk')]
wire_latch = wire_names[(x, y, 'io_global/latch')]
wire_oclk = wire_names[(x, y, 'io_global/outclk')]
wire_din_0 = wire_names[(x, y, ('io_%d/D_IN_0' % z))]
wire_din_1 = wire_names[(x, y, ('io_%d/D_IN_1' % z))]
wire_dout_0 = wire_names[(x, y, ('io_%d/D_OUT_0' % z))]
wire_dout_1 = wire_names[(x, y, ('io_%d/D_OUT_1' % z))]
wire_out_en = wire_names[(x, y, ('io_%d/OUT_ENB' % z))]
add_bel_input(bel, wire_cen, 'CLOCK_ENABLE')
add_bel_input(bel, wire_iclk, 'INPUT_CLK')
add_bel_input(bel, wire_oclk, 'OUTPUT_CLK')
add_bel_input(bel, wire_latch, 'LATCH_INPUT_VALUE')
add_bel_output(bel, wire_din_0, 'D_IN_0')
add_bel_output(bel, wire_din_1, 'D_IN_1')
add_bel_input(bel, wire_dout_0, 'D_OUT_0')
add_bel_input(bel, wire_dout_1, 'D_OUT_1')
add_bel_input(bel, wire_out_en, 'OUTPUT_ENABLE')
for (gidx, ginfo) in glbinfo.items():
if ((ginfo['pi_gb_x'], ginfo['pi_gb_y'], ginfo['pi_gb_pio']) == (x, y, z)):
add_bel_output(bel, wire_names[(x, y, ('glb_netwk_%d' % gidx))], 'GLOBAL_BUFFER_OUTPUT') |
class MultiHeadAttention(nn.Module):
def __init__(self, embed_dim, num_attention_heads, dropout, output_dim=None) -> None:
super().__init__()
self.embed_dim = embed_dim
self.num_attention_heads = num_attention_heads
self.head_dim = (self.embed_dim // self.num_attention_heads)
self.scale = (self.head_dim ** (- 0.5))
self.to_qkv = nn.Linear(embed_dim, (embed_dim * 3), bias=False)
self.out_projection = nn.Linear(embed_dim, (output_dim or embed_dim))
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask=None):
(q, k, v) = self.to_qkv(x).chunk(3, dim=(- 1))
(q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_attention_heads)), (q, k, v))
scaled_dot_product = (einsum('b h i d, b h j d -> b h i j', q, k) * self.scale)
if (mask is not None):
scaled_dot_product = (scaled_dot_product + mask)
attn_weights = scaled_dot_product.softmax(dim=(- 1))
attn_weights = self.dropout(attn_weights)
out = einsum('b h i j, b h j d -> b h i d', attn_weights, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.out_projection(out) |
class PVRCNN_M_DB_3(Detector3DTemplate_M_DB_3):
def __init__(self, model_cfg, num_class, num_class_s2, num_class_s3, dataset, dataset_s2, dataset_s3, source_one_name, source_1):
super().__init__(model_cfg=model_cfg, num_class=num_class, num_class_s2=num_class_s2, num_class_s3=num_class_s3, dataset=dataset, dataset_s2=dataset_s2, dataset_s3=dataset_s3, source_one_name=source_one_name, source_1=source_1)
self.module_list = self.build_networks()
self.source_one_name = source_one_name
self.source_1 = source_1
def forward(self, batch_dict):
batch_s1 = {}
batch_s2 = {}
batch_s3 = {}
if self.training:
len_of_module = len(self.module_list)
for (k, cur_module) in enumerate(self.module_list):
if (k < (len_of_module - 9)):
batch_dict = cur_module(batch_dict)
if ((k == (len_of_module - 9)) or (k == (len_of_module - 8)) or (k == (len_of_module - 7))):
if (k == (len_of_module - 9)):
(split_tag_s1, split_tag_s2_pre) = common_utils.split_batch_dict('waymo', batch_dict)
(batch_s1, batch_s2_pre) = common_utils.split_two_batch_dict_gpu(split_tag_s1, split_tag_s2_pre, batch_dict)
(split_tag_s2, split_tag_s3) = common_utils.split_batch_dict(self.source_one_name, batch_s2_pre)
(batch_s2, batch_s3) = common_utils.split_two_batch_dict_gpu(split_tag_s2, split_tag_s3, batch_s2_pre)
batch_s1 = cur_module(batch_s1)
if ((k == (len_of_module - 6)) or (k == (len_of_module - 5)) or (k == (len_of_module - 4))):
batch_s2 = cur_module(batch_s2)
if ((k == (len_of_module - 3)) or (k == (len_of_module - 2)) or (k == (len_of_module - 1))):
batch_s3 = cur_module(batch_s3)
else:
len_of_module = len(self.module_list)
for (k, cur_module) in enumerate(self.module_list):
if (k < (len_of_module - 9)):
batch_dict = cur_module(batch_dict)
if ((k == (len_of_module - 9)) or (k == (len_of_module - 8)) or (k == (len_of_module - 7))):
if (self.source_1 == 1):
batch_dict = cur_module(batch_dict)
else:
continue
if ((k == (len_of_module - 6)) or (k == (len_of_module - 5)) or (k == (len_of_module - 4))):
if (self.source_1 == 2):
batch_dict = cur_module(batch_dict)
else:
continue
if ((k == (len_of_module - 3)) or (k == (len_of_module - 2)) or (k == (len_of_module - 1))):
if (self.source_1 == 3):
batch_dict = cur_module(batch_dict)
else:
continue
if self.training:
(loss_1, tb_dict_1, disp_dict_1) = self.get_training_loss_s1()
(loss_2, tb_dict_2, disp_dict_2) = self.get_training_loss_s2()
(loss_3, tb_dict_3, disp_dict_3) = self.get_training_loss_s3()
ret_dict = {'loss': ((loss_1 + loss_2) + loss_3)}
return (ret_dict, tb_dict_1, disp_dict_1)
else:
(pred_dicts, recall_dicts) = self.post_processing(batch_dict)
return (pred_dicts, recall_dicts)
def get_training_loss_s1(self):
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head_s1.get_loss()
(loss_point, tb_dict) = self.point_head_s1.get_loss(tb_dict)
(loss_rcnn, tb_dict) = self.roi_head_s1.get_loss(tb_dict)
loss = ((loss_rpn + loss_point) + loss_rcnn)
return (loss, tb_dict, disp_dict)
def get_training_loss_s2(self):
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head_s2.get_loss()
(loss_point, tb_dict) = self.point_head_s2.get_loss(tb_dict)
(loss_rcnn, tb_dict) = self.roi_head_s2.get_loss(tb_dict)
loss = ((loss_rpn + loss_point) + loss_rcnn)
return (loss, tb_dict, disp_dict)
def get_training_loss_s3(self):
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head_s3.get_loss()
(loss_point, tb_dict) = self.point_head_s3.get_loss(tb_dict)
(loss_rcnn, tb_dict) = self.roi_head_s3.get_loss(tb_dict)
loss = ((loss_rpn + loss_point) + loss_rcnn)
return (loss, tb_dict, disp_dict) |
def get_slim_ratio_schedule(train_slim_ratios: list, mode: str, client_num):
if mode.startswith('ln'):
ws = sorted(train_slim_ratios)
min_w = min(train_slim_ratios)
from scipy.stats import lognorm
(s, scale) = [float(v) for v in mode[len('ln'):].split('_')]
rv = lognorm(s=s, scale=scale)
print(ws)
cdfs = ([rv.cdf(w) for w in ws] + [1.0])
print(cdfs)
qs = [(c - rv.cdf(min_w)) for c in cdfs]
r = (qs[(- 1)] - qs[0])
qs = [int(((client_num * (q - qs[0])) / r)) for q in qs]
print(qs)
slim_ratios = np.zeros(client_num)
for i in range((len(qs) - 1)):
slim_ratios[qs[i]:qs[(i + 1)]] = ws[i]
return slim_ratios |
class Sensor(object):
def __init__(self, transform, config):
self.type_id = 'sensor.camera.rgb'
self.transform = transform
self.attributes = dict()
self.attributes['role_name'] = 'front'
self.attributes['image_size_x'] = str(config['img_length'])
self.attributes['image_size_y'] = str(config['img_width'])
self.attributes['fov'] = str(config['fov']) |
class ResNet101(nn.Module):
def __init__(self, block, layers, num_classes, phase):
self.inplanes = 64
self.phase = phase
super(ResNet101, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.aspp = ASPP((512 * block.expansion), 256, num_classes, phase=phase)
self.uncertainty = nn.Sequential(nn.Conv2d(256, 400, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(400, 120, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(120, 64, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(64, 1, 3, 1, 1), nn.Sigmoid())
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion)) or (dilation == 2) or (dilation == 4)):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), affine=affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self, block, inplanes, dilation_series, padding_series, num_classes):
return block(inplanes, dilation_series, padding_series, num_classes)
def forward(self, x):
(_, _, h, w) = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
(x, feat) = self.aspp(x)
uncertainty = self.uncertainty(feat)
if (self.phase == 'train'):
x = nn.functional.interpolate(x, (h, w), mode='bilinear', align_corners=True)
uncertainty = nn.functional.interpolate(uncertainty, (h, w), mode='bilinear', align_corners=True)
return (x, uncertainty)
def get_1x_lr_params_NOscale(self):
b = []
b.append(self.conv1)
b.append(self.bn1)
b.append(self.layer1)
b.append(self.layer2)
b.append(self.layer3)
b.append(self.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
(yield k)
def get_10x_lr_params(self):
b = []
b.append(self.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
(yield i)
def optim_parameters(self, args):
return [{'params': self.get_1x_lr_params_NOscale(), 'lr': args.learning_rate}, {'params': self.get_10x_lr_params(), 'lr': (10 * args.learning_rate)}]
def adjust_learning_rate(self, learning_rate, optimizer, i):
power = 0.9
num_steps = 250000
lr = (learning_rate * ((1 - (float(i) / num_steps)) ** power))
optimizer.param_groups[0]['lr'] = lr
if (len(optimizer.param_groups) > 1):
optimizer.param_groups[1]['lr'] = (lr * 10)
def CrossEntropy2d(self, predict, target, weight=None, size_average=True):
assert (not target.requires_grad)
assert (predict.dim() == 4)
assert (target.dim() == 3)
assert (predict.size(0) == target.size(0)), '{0} vs {1} '.format(predict.size(0), target.size(0))
assert (predict.size(2) == target.size(1)), '{0} vs {1} '.format(predict.size(2), target.size(1))
assert (predict.size(3) == target.size(2)), '{0} vs {1} '.format(predict.size(3), target.size(3))
(n, c, h, w) = predict.size()
target_mask = ((target >= 0) * (target != 255))
target = target[target_mask]
if (not target.data.dim()):
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view((- 1), c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=size_average)
return loss
def set_dropout_train_mode(self):
for m in self.modules():
if isinstance(m, nn.Dropout2d):
m.train() |
def spline_iter(xs, ys, is_training, spline_deg=2, filter_ratio=0.03, num_of_iter=10, bound=0.5):
bound = xs[int(((len(xs) - 1) * bound))]
if is_training:
num_of_iter = 10
else:
num_of_iter = 1
for _ in range(num_of_iter):
spline_ys = UnivariateSpline(xs, ys, k=spline_deg)(xs)
dys = np.abs((ys - spline_ys))
if is_training:
outliers = set(sorted(range(len(dys)), key=(lambda i: dys[i]))[int(round(((- len(dys)) * filter_ratio))):])
else:
outliers = set(sorted(range(len(dys)), key=(lambda i: dys[i]))[(- 1):])
outliers = [i for i in outliers if (i < bound)]
xs2 = np.zeros((len(xs) - len(outliers)))
ys2 = np.zeros((len(xs) - len(outliers)))
i1 = 0
for i2 in range(len(xs)):
if (i2 not in outliers):
(xs2[i1], ys2[i1]) = (xs[i2], ys[i2])
i1 += 1
(xs, ys) = (xs2, ys2)
return (xs, ys) |
class argument(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs |
def save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar')) |
def plot(x, y, yhat, loss, err, filename):
subplots = [221, 222, 223, 224]
plt.figure(1, figsize=(10, 8))
plt.subplots_adjust(top=0.88)
for i in range(4):
(x_, y_, yhat_) = (x.detach().numpy()[i][0], y.detach().numpy()[i], yhat.detach().numpy()[i])
plt.subplot(subplots[i])
plt.plot(range(len(x_)), x_, color='b', label='x')
plt.plot(range(len(y_)), y_, color='g', label='y')
plt.plot(range(len(yhat_)), yhat_, color='r', label='yhat')
plt.suptitle(f'loss {loss:.2f} error {err:.2f}')
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.clf() |
def get_labels(path):
if path:
with open(path, 'r') as f:
labels = f.read().splitlines()
if ('O' not in labels):
labels = (['O'] + labels)
labels.append('CTC_PRED:0')
labels.append('CTC_PRED:1')
labels.append('pred_seg_label:O')
labels.append('pred_seg_label:Name')
return labels
else:
return ['O', 'B-MISC', 'I-MISC', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] |
class NLayerDiscriminator(BaseNetwork):
def modify_commandline_options(parser, is_train):
parser.add_argument('--n_layers_D', type=int, default=4, help='# layers in each discriminator')
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
kw = 4
padw = int(np.ceil(((kw - 1.0) / 2)))
nf = opt.ndf
input_nc = self.compute_D_input_nc(opt)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, False)]]
for n in range(1, opt.n_layers_D):
nf_prev = nf
nf = min((nf * 2), 512)
stride = (1 if (n == (opt.n_layers_D - 1)) else 2)
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=stride, padding=padw)), nn.LeakyReLU(0.2, False)]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
for n in range(len(sequence)):
self.add_module(('model' + str(n)), nn.Sequential(*sequence[n]))
def compute_D_input_nc(self, opt):
input_nc = (opt.label_nc + opt.output_nc)
if opt.contain_dontcare_label:
input_nc += 1
if (not opt.no_instance):
input_nc += 1
return input_nc
def forward(self, input):
results = [input]
for submodel in self.children():
intermediate_output = submodel(results[(- 1)])
results.append(intermediate_output)
get_intermediate_features = (not self.opt.no_ganFeat_loss)
if get_intermediate_features:
return results[1:]
else:
return results[(- 1)] |
def compute_embedding(backbone, data_loader):
device = next(backbone.parameters()).device
embs_l = []
imgs_l = []
labels = []
for (img, y) in data_loader:
img = img.to(device)
embs_l.append(backbone(img).detach().cpu())
imgs_l.append(((img * 0.224) + 0.45).cpu())
labels.extend([data_loader.dataset.classes[i] for i in y.tolist()])
embs = torch.cat(embs_l, dim=0)
imgs = torch.cat(imgs_l, dim=0)
return (embs, imgs, labels) |
def test_deterministic_tensorflow():
deterministic.set_seed(0)
with tf.compat.v1.Session() as sess:
rand_tensor = sess.run(tf.random.uniform((5, 5), seed=0))
deterministic_tensor = np.array([[0., 0.9701668, 0.8487642, 0., 0.], [0., 0.844468, 0., 0.5099584, 0.6552025], [0.9881507, 0., 0., 0., 0.], [0.4662125, 0.9912039, 0.6973165, 0.7741407, 0.8881662], [0., 0., 0., 0., 0.]], dtype=np.float32)
assert np.allclose(rand_tensor, deterministic_tensor) |
(autouse=True, name='remove')
def _remove(monkeypatch: MonkeyPatch, logging_side_effect: Callable) -> MagicMock:
mock = MagicMock(side_effect=logging_side_effect('os.remove'))
monkeypatch.setattr(os, 'remove', mock)
return mock |
class RandomSpectralKernel(AbstractSpectralKernel):
def __init__(self, measure, manifold):
super().__init__(measure, manifold)
manifold.generate_lb_eigenspaces(measure)
point = self.manifold.rand()
self.normalizer = self.forward(point, point, normalize=False)[(0, 0)]
def compute_normalizer(self):
point = self.manifold.rand()
self.normalizer = self.forward(point, point, normalize=False)[(0, 0)]
def forward(self, x, y=None, normalize=True):
if self.training:
self.manifold.generate_lb_eigenspaces(self.measure)
if normalize:
self.compute_normalizer()
if (y is None):
y = x
(x_, y_) = (self.manifold.to_group(x), self.manifold.to_group(y))
x_yinv = self.manifold.pairwise_diff(x_, y_)
f = self.manifold.lb_eigenspaces
x_yinv_embed = f(x_yinv)
eye_embed = f(self.manifold.id)
cov_flatten = (x_yinv_embed torch.conj(eye_embed).T)
cov = cov_flatten.view(x.size()[0], y.size()[0])
if normalize:
return ((self.measure.variance[0] * cov.real) / self.normalizer)
else:
return cov.real |
def tensor_to_pil(tensor_imgs):
if (type(tensor_imgs) == list):
tensor_imgs = torch.cat(tensor_imgs)
tensor_imgs = ((tensor_imgs / 2) + 0.5).clamp(0, 1)
to_pil = T.ToPILImage()
pil_imgs = [to_pil(img) for img in tensor_imgs]
return pil_imgs |
def train(dst_path):
((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.cifar10.load_data()
input_shape = x_train.shape[1:]
x_train = (x_train.astype('float32') / 255)
x_test = (x_test.astype('float32') / 255)
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = resnet_v2(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), metrics=['accuracy'])
model.summary()
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_schedule)
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=5e-07)
callbacks = [lr_reducer, lr_scheduler]
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks)
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
model.save(dst_path) |
_task('audio_pretraining')
class AudioPretrainingTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-rate', default=16000, type=int, help='target sample rate. audio files will be up/down sampled to this rate')
parser.add_argument('--max-sample-size', default=None, type=int, help='max sample size to crop to for batching. default = min sample length')
parser.add_argument('--min-sample-size', default=None, type=int, help='min sample size to crop to for batching. default = same as --max-sample-size')
def __init__(self, args):
super().__init__(args)
def setup_task(cls, args, **kwargs):
return cls(args)
def load_dataset(self, split, **kwargs):
manifest = os.path.join(self.args.data, '{}.tsv'.format(split))
self.datasets[split] = FileAudioDataset(manifest, sample_rate=self.args.sample_rate, max_sample_size=self.args.max_sample_size, min_sample_size=self.args.min_sample_size)
def target_dictionary(self):
return None |
class TinyDiscriminator(nn.Module):
def __init__(self, n_features, n_classes=1, d_hidden=128):
super(TinyDiscriminator, self).__init__()
self.n_features = n_features
self.n_classes = n_classes
self.d_hidden = d_hidden
self.l1 = nn.Linear(n_features, d_hidden)
self.l2 = nn.Linear(d_hidden, 1)
if (n_classes > 1):
self.linear_y = nn.Embedding(n_classes, d_hidden)
def forward(self, inputs, y=None):
output = self.l1(inputs)
features = F.leaky_relu(output, 0.1, inplace=True)
d = self.l2(features)
if (y is not None):
w_y = self.linear_y(y)
d = (d + (features * w_y).sum(1, keepdim=True))
return d |
class ClicEdmSingleGammaHitsPf(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.5.0')
RELEASE_NOTES = {'1.1.0': 'Remove track referencepoint feature', '1.2.0': 'Keep all interacting genparticles', '1.5.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n For the raw input files in ROOT EDM4HEP format, please see the citation above.\n\n The processed tensorflow_dataset can also be downloaded from:\n FIXME\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(ClicEdmSingleGammaHitsPf, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, max(len(X_FEATURES_TRK), len(X_FEATURES_CH))), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=None, homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features_track=X_FEATURES_TRK, x_features_calohit=X_FEATURES_CH, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
return split_sample(Path((path / 'gamma/')))
def _generate_examples(self, files):
return generate_examples(files) |
def main(_run, ds_name, train_test_split, verbose, gpu, sanity_dim, scaling, tfms, clf, grid_search, rescaling, disintegrations, num_augments, augment_out, num_projections, projection_channels, window, depth, sig_tfm, normalisation, save_best_model):
try:
_run.save_dir = '{}/{}'.format(save_dir, _run._id)
(ds_train, ds_test) = get_data(ds_name, train_test_split)
(path_tfms, in_channels) = preprocess(ds_train, scaling, tfms)
(ds_length, ds_dim, n_classes) = (ds_train.size(1), ds_train.size(2), ds_train.n_classes)
(window_name, window_kwargs) = window
(in_channels_clf, signature_channels) = compute_input_size(in_channels, ds_length, window_name, window_kwargs, clf, disintegrations, augment_out, num_augments, num_projections, projection_channels, sig_tfm, depth, sanity_dim=sanity_dim)
_run.log_scalar('ds_length', ds_length)
_run.log_scalar('ds_dim', ds_dim)
_run.log_scalar('n_classes', n_classes)
_run.log_scalar('n_train_samples', ds_train.size(0))
_run.log_scalar('n_test_samples', ds_test.size(0))
_run.log_scalar('in_channels_clf', in_channels_clf)
is_learnt = check_learnt(num_augments, augment_out, normalisation)
is_sklearn = check_sklearn(clf)
is_meta = check_meta(window_name, clf)
model_args = {'in_channels': in_channels, 'signature_channels': signature_channels, 'out_channels': (n_classes if (n_classes > 2) else 1), 'ds_length': ds_length, 'disintegrations': disintegrations, 'num_augments': num_augments, 'augment_out': augment_out, 'num_projections': num_projections, 'projection_channels': projection_channels, 'window_name': window_name, 'window_kwargs': window_kwargs, 'sig_tfm': sig_tfm, 'depth': depth, 'rescaling': rescaling, 'normalisation': normalisation, 'clf': clf, 'in_channels_clf': in_channels_clf, 'gpu': gpu}
model_dict = train_models(_run, model_args, path_tfms, ds_train, is_learnt, is_sklearn, is_meta, grid_search, verbose=verbose)
evaluate_models(_run, model_dict, ds_train, ds_test, is_sklearn, n_classes, save_best_model)
_run.log_scalar('error', None)
set_completion_state(_run, True)
except Exception as e:
handle_error(_run, e, print_error=True) |
class TFFunnelBaseModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def extract_spatial_feats(feat_dir, out_dir):
info_json_path = os.path.join(feat_dir, 'gqa_spatial_info.json')
info_dict = json.load(open(info_json_path, 'r'))
file_mapping = {k: [] for k in range(16)}
for (k, v) in info_dict.items():
file_mapping[v['file']] += [(k, v)]
for i in range(16):
file_path = os.path.join(feat_dir, 'gqa_spatial_{}.h5'.format(i))
print('Processing file {}'.format(file_path))
feat_db = h5py.File(file_path, 'r')
for entry in tqdm.tqdm(file_mapping[i]):
image_id = entry[0]
meta = entry[1]
to_save = feat_db['features'][meta['idx']]
to_save = to_save.reshape(1, 7, 7, 2048)
save_path = os.path.join(out_dir, (str(image_id) + '.npy'))
np.save(save_path, to_save) |
def _import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module |
def main():
(args, config) = parse_args_and_config()
log_progress = open(os.path.join(args.log, 'log_progress'), 'w')
sys.stdout = log_progress
logging.info('Config =')
print(('>' * 80))
print(config)
print(('<' * 80))
try:
runner = eval(args.runner)(args, config)
runner.run(log_progress)
except:
logging.error(traceback.format_exc())
log_progress.close()
return 0 |
def get_model_modules():
_ignore_modules = ['modeling_auto', 'modeling_encoder_decoder', 'modeling_marian', 'modeling_mmbt', 'modeling_outputs', 'modeling_retribert', 'modeling_utils', 'modeling_flax_auto', 'modeling_flax_encoder_decoder', 'modeling_flax_utils', 'modeling_speech_encoder_decoder', 'modeling_flax_vision_encoder_decoder', 'modeling_transfo_xl_utilities', 'modeling_tf_auto', 'modeling_tf_encoder_decoder', 'modeling_tf_outputs', 'modeling_tf_pytorch_utils', 'modeling_tf_utils', 'modeling_tf_transfo_xl_utilities', 'modeling_tf_vision_encoder_decoder', 'modeling_vision_encoder_decoder']
modules = []
for model in dir(transformers.models):
if (not model.startswith('__')):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if (submodule.startswith('modeling') and (submodule not in _ignore_modules)):
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules |
class SEWDConfig(PretrainedConfig):
model_type = 'sew-d'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, squeeze_factor=2, max_position_embeddings=512, position_buckets=256, share_att_key=True, relative_attention=True, pos_att_type=('p2c', 'c2p'), norm_rel_ebd='layer_norm', hidden_act='gelu_python', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, initializer_range=0.02, layer_norm_eps=1e-07, feature_layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.squeeze_factor = squeeze_factor
self.max_position_embeddings = max_position_embeddings
self.position_buckets = position_buckets
self.share_att_key = share_att_key
self.relative_attention = relative_attention
self.norm_rel_ebd = norm_rel_ebd
self.pos_att_type = list(pos_att_type)
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layer_norm_eps = layer_norm_eps
self.feature_layer_norm_eps = feature_layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
if ((len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(f'Configuration for convolutional layers is incorrect.It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1) |
def read_image_list_file(image_list_file):
with open(image_list_file, 'r') as f:
for line in f:
(yield line.strip().replace('.jpg', '')) |
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args) |
def train():
logging.info('Training phase.')
rng = np.random.RandomState(FLAGS.seed)
n_completed_steps = get_number_of_already_completed_steps(FLAGS.logdir)
t_train = build_graph(TRAIN, rng=util.new_rng(rng), n_epochs=FLAGS.epochs, n_completed_steps=n_completed_steps)
logging.info(f'Number of trainable parameters: {tfu.count_trainable_params():,}')
t_valid = (build_graph(VALID, shuffle=True, rng=util.new_rng(rng)) if FLAGS.validate_period else None)
helpers.run_train_loop(t_train.train_op, checkpoint_dir=FLAGS.checkpoint_dir, load_path=FLAGS.load_path, hooks=make_training_hooks(t_train, t_valid), init_fn=get_init_fn())
logging.info('Ended training.') |
def load_topset(topset_path) -> Dict[(str, OpConfig)]:
conf = OmegaConf.load(topset_path)['topset']
ret = {}
for (k, v) in conf.items():
ret[k] = OpConfig(in_dtypes=[tuple([DType[t] for t in dtypes]) for dtypes in v['in_dtypes']], out_dtypes=[tuple([DType[t] for t in dtypes]) for dtypes in v['out_dtypes']])
return ret |
def main(config='config/blendcnn/mrpc/eval.json', args=None):
cfg = Config(**json.load(open(config, 'r')))
cfg_data = data.Config(**json.load(open(cfg.cfg_data, 'r')))
cfg_model = models.Config(**json.load(open(cfg.cfg_model, 'r')))
cfg_optim = trainer.Config(**json.load(open(cfg.cfg_optim, 'r')))
set_seeds(cfg.seed)
TaskDataset = data.get_class(cfg_data.task)
tokenizer = tokenization.FullTokenizer(vocab_file=cfg_data.vocab_file, do_lower_case=True)
dataset = TaskDataset(args.dataset_location, pipelines=[data.RemoveSymbols('\\'), data.Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize), data.AddSpecialTokensWithTruncation(cfg_data.max_len), data.TokenIndexing(tokenizer.convert_tokens_to_ids, TaskDataset.labels, cfg_data.max_len)], n_data=None)
dataset = TensorDataset(*dataset.get_tensors())
data_iter = DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
model = models.BlendCNN(cfg_model, len(TaskDataset.labels))
checkpoint.load_embedding(model.embed, cfg.pretrain_file)
optimizer = optim.optim4GPU(cfg_optim, model)
train_loop = trainer.TrainLoop(cfg_optim, model, data_iter, optimizer, cfg.save_dir, get_device())
def get_loss(model, batch, global_step):
(input_ids, segment_ids, input_mask, label_id) = batch
logits = model(input_ids, segment_ids, input_mask)
loss = nn.CrossEntropyLoss()(logits, label_id)
return loss
def evaluate(model, batch):
(input_ids, segment_ids, input_mask, label_id) = batch
logits = model(input_ids, segment_ids, input_mask)
(_, label_pred) = logits.max(1)
result = (label_pred == label_id).float()
accuracy = result.mean()
return (accuracy, result)
class Bert_DataLoader(object):
def __init__(self, loader=None, model_type=None, device='cpu', batch_size=1):
self.loader = loader
self.model_type = model_type
self.device = device
self.batch_size = batch_size
def __iter__(self):
for batch in self.loader:
batch = tuple((t.to(self.device) for t in batch))
outputs = {'output_all': (batch[0], batch[1], batch[2]), 'labels': batch[3]}
(yield (outputs['output_all'], outputs['labels']))
eval_dataloader = Bert_DataLoader(loader=data_iter, batch_size=args.batch_size)
def benchmark(model):
total_samples = 0
total_time = 0
index = 0
class RandomDataset(object):
def __init__(self, size, shape):
self.len = size
self.input_ids = torch.randint(low=0, high=30522, size=(size, shape), dtype=torch.int64)
self.segment_ids = torch.randint(low=0, high=1, size=(size, shape), dtype=torch.int64)
self.input_mask = torch.randint(low=0, high=1, size=(size, shape), dtype=torch.int64)
self.data = (self.input_ids, self.segment_ids, self.input_mask)
def __getitem__(self, index):
return (self.data[0][index], self.data[1][index], self.data[2][index])
def __len__(self):
return self.len
rand_loader = DataLoader(dataset=RandomDataset(size=5000, shape=128), batch_size=args.batch_size, shuffle=True)
for batch in rand_loader:
index += 1
tic = time.time()
with torch.no_grad():
(input_ids, segment_ids, input_mask) = batch
_ = model(*batch)
if (index > args.warmup):
total_samples += batch[0].size()[0]
total_time += (time.time() - tic)
throughput = (total_samples / total_time)
print(('Latency: %.3f ms' % ((1 / throughput) * 1000)))
print(('Throughput: %.3f images/sec' % throughput))
def eval_func(model):
results = []
index = 0
model.eval()
for (batch, label) in eval_dataloader:
index += 1
with torch.no_grad():
(accuracy, result) = evaluate(model, (*batch, label))
results.append(result)
total_accuracy = torch.cat(results).mean().item()
print(('Accuracy: %.3f ' % total_accuracy))
return total_accuracy
if (cfg.mode == 'train'):
train_loop.train(get_loss, cfg.model_file, None)
print('Training has been done properly.')
elif (cfg.mode == 'eval'):
per_device_eval_batch_size = 8
max_seq_length = 384
if args.tune:
ipex.nn.utils._model_convert.replace_dropout_with_identity(model)
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor import quantization
dummy_input_ids = torch.ones((per_device_eval_batch_size, max_seq_length), dtype=torch.long)
dummy_token_type_ids = torch.ones((per_device_eval_batch_size, max_seq_length), dtype=torch.long)
dummy_attention_mask = torch.ones((per_device_eval_batch_size, max_seq_length), dtype=torch.long)
example_inputs = (dummy_input_ids, dummy_attention_mask, dummy_token_type_ids)
conf = PostTrainingQuantConfig(backend='ipex', calibration_sampling_size=800, example_inputs=example_inputs)
q_model = quantization.fit(model, conf, calib_dataloader=eval_dataloader, eval_func=eval_func)
q_model.save(cfg.save_dir)
exit(0)
model.eval()
if args.int8:
from neural_compressor.utils.pytorch import load
model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model)
else:
from neural_compressor.adaptor.pytorch import get_example_inputs
example_inputs = get_example_inputs(model, eval_dataloader)
model = ipex.optimize(model=model)
with torch.no_grad():
model = torch.jit.trace(model, example_inputs, strict=False)
model = torch.jit.freeze(model)
if (args.performance or args.accuracy):
if args.performance:
benchmark(model)
else:
eval_func(model)
exit(0) |
class FGSM(Attacker):
def __init__(self, eps=0.15, clip_max=0.5, clip_min=(- 0.5)):
super(FGSM, self).__init__(clip_max, clip_min)
self.eps = eps
def perturb(self, model, x, y):
model.eval()
nx = torch.unsqueeze(x, 0)
ny = torch.unsqueeze(y, 0)
nx.requires_grad_()
out = model(nx)
loss = F.cross_entropy(out, ny)
loss.backward()
x_adv = (nx + (self.eps * torch.sign(nx.grad.data)))
x_adv.clamp_(self.clip_min, self.clip_max)
x_adv.squeeze_(0)
return x_adv.detach() |
def vat(network, x, eps_list, xi=10, Ip=1):
with torch.no_grad():
y = network(x)
d = torch.randn((x.size()[0], x.size()[1]))
d = F.normalize(d, p=2, dim=1)
for ip in range(Ip):
d_var = d
d_var = d_var.to(x.device)
d_var.requires_grad_(True)
y_p = network((x + (xi * d_var)), update_batch_stats=False)
kl_loss = distance(y, y_p)
kl_loss.backward()
d = d_var.grad
d = F.normalize(d, p=2, dim=1)
d_var = d
d_var = d_var.to(x.device)
eps = (0.25 * eps_list)
eps = eps.view((- 1), 1)
return ((x + (eps * d_var)).detach(), (eps * d_var).detach()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.