code stringlengths 101 5.91M |
|---|
class InvertibleModuleWrapper(nn.Module):
def __init__(self, fn, keep_input=False, keep_input_inverse=False, num_bwd_passes=1, disable=False, preserve_rng_state=False):
super(InvertibleModuleWrapper, self).__init__()
self.disable = disable
self.keep_input = keep_input
self.keep_input_inverse = keep_input_inverse
self.num_bwd_passes = num_bwd_passes
self.preserve_rng_state = preserve_rng_state
self._fn = fn
def forward(self, *xin):
if (not self.disable):
y = InvertibleCheckpointFunction.apply(self._fn.forward, self._fn.inverse, self.keep_input, self.num_bwd_passes, self.preserve_rng_state, len(xin), *(xin + tuple([p for p in self._fn.parameters() if p.requires_grad])))
else:
y = self._fn(*xin)
if (isinstance(y, tuple) and (len(y) == 1)):
return y[0]
return y
def inverse(self, *yin):
if (not self.disable):
x = InvertibleCheckpointFunction.apply(self._fn.inverse, self._fn.forward, self.keep_input_inverse, self.num_bwd_passes, self.preserve_rng_state, len(yin), *(yin + tuple([p for p in self._fn.parameters() if p.requires_grad])))
else:
x = self._fn.inverse(*yin)
if (isinstance(x, tuple) and (len(x) == 1)):
return x[0]
return x |
def main():
data_root = '../datasets/humanml3d'
feastures_path = 'in.npy'
animation_save_path = 'in.mp4'
fps = 20
mean = np.load(pjoin(data_root, 'Mean.npy'))
std = np.load(pjoin(data_root, 'Std.npy'))
motion = np.load(feastures_path)
motion = ((motion * std) + mean)
motion_rec = recover_from_ric(torch.tensor(motion), 22).cpu().numpy()
motion_rec = (motion_rec * 1.3)
plot_3d_motion(animation_save_path, motion_rec, title='input', fps=fps) |
class Mlp(nn.Module):
def __init__(self, hidden_sizes, output_size, input_size, init_w=0.003, hidden_activation=F.relu, output_activation=identity, hidden_init=ptu.fanin_init, b_init_value=0.1, layer_norm=False, layer_norm_kwargs=None):
super().__init__()
if (layer_norm_kwargs is None):
layer_norm_kwargs = dict()
self.input_size = input_size
self.output_size = output_size
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.layer_norm = layer_norm
self.fcs = nn.ModuleList()
self.layer_norms = []
in_size = input_size
for (i, next_size) in enumerate(hidden_sizes):
fc = nn.Linear(in_size, next_size)
in_size = next_size
hidden_init(fc.weight)
fc.bias.data.fill_(b_init_value)
self.fcs.append(fc)
if self.layer_norm:
ln = LayerNorm(next_size)
self.__setattr__('layer_norm{}'.format(i), ln)
self.layer_norms.append(ln)
self.last_fc = nn.Linear(in_size, output_size)
self.last_fc.weight.data.uniform_((- init_w), init_w)
self.last_fc.bias.data.uniform_((- init_w), init_w)
def forward(self, input, return_preactivations=False):
h = input
for (i, fc) in enumerate(self.fcs):
h = fc(h)
if (self.layer_norm and (i < (len(self.fcs) - 1))):
h = self.layer_norms[i](h)
h = self.hidden_activation(h)
preactivation = self.last_fc(h)
output = self.output_activation(preactivation)
if return_preactivations:
return (output, preactivation)
else:
return output |
class ProbeRegimen(InitYAMLObject):
yaml_tag = '!ProbeRegimen'
def __init__(self, args, max_epochs, params_path, reporting_root, max_gradient_steps=(- 1), eval_dev_every=(- 1)):
self.args = args
self.max_epochs = max_epochs
self.reporting_root = reporting_root
self.params_name = params_path
self.max_gradient_steps = (sys.maxsize if (max_gradient_steps == (- 1)) else max_gradient_steps)
self.eval_dev_every = eval_dev_every
self.loss = CustomCrossEntropyLoss(args)
def train_until_convergence(self, probe, model, loss, train_dataset, dev_dataset, gradient_steps_between_eval, finetune=False):
self.params_path = os.path.join(self.reporting_root, self.params_name)
if finetune:
self.optimizer = optim.Adam((list(probe.parameters()) + list(model.parameters())), lr=1e-05, weight_decay=0)
else:
self.optimizer = optim.Adam(probe.parameters(), lr=0.001, weight_decay=0)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.5, patience=0)
min_dev_loss = sys.maxsize
min_dev_loss_epoch = (- 1)
gradient_steps = 0
eval_dev_every = gradient_steps_between_eval
eval_index = 0
min_dev_loss_eval_index = (- 1)
eval_dev_losses = []
for epoch_index in tqdm(range(self.max_epochs), desc='[training]'):
epoch_train_loss = 0
epoch_train_loss_count = 0
for batch in tqdm(train_dataset, desc='[training]'):
probe.train()
self.optimizer.zero_grad()
(input_batch, output_batch, sentences) = batch
word_representations = model(input_batch)
predictions = probe(word_representations)
(batch_loss, count) = self.loss(predictions, output_batch)
batch_loss.backward()
epoch_train_loss += batch_loss.detach().cpu().numpy()
epoch_train_loss_count += count.detach().cpu().numpy()
self.optimizer.step()
gradient_steps += 1
if ((gradient_steps % eval_dev_every) == 0):
eval_index += 1
if (gradient_steps >= self.max_gradient_steps):
tqdm.write('Hit max gradient steps; stopping')
return eval_dev_losses
epoch_dev_loss = 0
epoch_dev_loss_count = 0
for batch in tqdm(dev_dataset, desc='[dev batch]'):
self.optimizer.zero_grad()
probe.eval()
(input_batch, output_batch, _) = batch
word_representations = model(input_batch)
predictions = probe(word_representations)
(batch_loss, count) = self.loss(predictions, output_batch)
epoch_dev_loss += batch_loss.detach().cpu().numpy()
epoch_dev_loss_count += count.detach().cpu().numpy()
self.scheduler.step(epoch_dev_loss)
tqdm.write('[epoch {}] Train loss: {}, Dev loss: {}'.format(epoch_index, (epoch_train_loss / epoch_train_loss_count), (epoch_dev_loss / epoch_dev_loss_count)))
eval_dev_losses.append(epoch_dev_loss)
if ((epoch_dev_loss / epoch_dev_loss_count) < (min_dev_loss - 0.001)):
torch.save(probe.state_dict(), self.params_path)
min_dev_loss = (epoch_dev_loss / epoch_dev_loss_count)
min_dev_loss_epoch = epoch_index
min_dev_loss_eval_index = eval_index
tqdm.write('Saving probe parameters to {}'.format(self.params_path))
elif (min_dev_loss_eval_index <= (eval_index - 3)):
tqdm.write('Early stopping')
return eval_dev_losses
return eval_dev_losses
def predict(self, probe, model, dataset):
probe.eval()
predictions_by_batch = []
for batch in tqdm(dataset, desc='[predicting]'):
(input_batch, label_batch, _) = batch
word_representations = model(input_batch)
predictions = probe(word_representations)
predictions_by_batch.append(predictions.detach().cpu())
return predictions_by_batch |
def initialize_scores(model, init_type):
print(f'Initialization relevance score with {init_type} initialization')
for m in model.modules():
if hasattr(m, 'popup_scores'):
if (init_type == 'kaiming_uniform'):
nn.init.kaiming_uniform_(m.popup_scores)
elif (init_type == 'kaiming_normal'):
nn.init.kaiming_normal_(m.popup_scores)
elif (init_type == 'xavier_uniform'):
nn.init.xavier_uniform_(m.popup_scores, gain=nn.init.calculate_gain('relu'))
elif (init_type == 'xavier_normal'):
nn.init.xavier_normal_(m.popup_scores, gain=nn.init.calculate_gain('relu')) |
def get_affine_transform_for_beta_dist(target_min, target_max):
if isinstance(target_min, (np.ndarray, np.generic)):
assert np.all((target_min <= target_max))
else:
assert (target_min <= target_max)
return AffineTransformEx(loc=torch.tensor(target_min), scale=torch.tensor((target_max - target_min))) |
def print_eval(prepare_data_fun, out_label):
model_file = os.path.join(snapshot_dir, 'best_model.pth')
pkl_res_file = os.path.join(snapshot_dir, ('best_model_predict_%s.pkl' % out_label))
out_file = os.path.join(snapshot_dir, ('best_model_predict_%s.json' % out_label))
data_set_test = prepare_data_fun(**cfg['data'], **cfg['model'], verbose=True)
data_reader_test = DataLoader(data_set_test, shuffle=False, batch_size=cfg.data.batch_size, num_workers=cfg.data.num_workers)
ans_dic = data_set_test.answer_dict
model = build_model(cfg, data_set_test)
model.load_state_dict(torch.load(model_file)['state_dict'])
model.eval()
(question_ids, soft_max_result) = run_model(model, data_reader_test, ans_dic.UNK_idx)
print_result(question_ids, soft_max_result, ans_dic, out_file, json_only=False, pkl_res_file=pkl_res_file) |
def fftscore_setup():
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(8.0) |
def combine_vit(vit_result, sent_lst, out_path):
print(len(vit_result), len(sent_lst))
seg_result = []
for (idx, (cont, it)) in enumerate(vit_result):
entgt = sent_lst[it]
seg_result.append(' '.join(visual_viterb(cont, entgt)))
with open(out_path, 'w') as f:
for elem in seg_result:
print(elem, file=f)
return |
def _in_projection(q: Tensor, k: Tensor, v: Tensor, w_q: Tensor, w_k: Tensor, w_v: Tensor, b_q: Optional[Tensor]=None, b_k: Optional[Tensor]=None, b_v: Optional[Tensor]=None) -> Tuple[(Tensor, Tensor, Tensor)]:
(Eq, Ek, Ev) = (q.size((- 1)), k.size((- 1)), v.size((- 1)))
assert (Eq == Ek == Ev), 'query, key, and value must have the same dimension'
(qdim, kdim, vdim) = (w_q.shape[0], w_k.shape[0], w_v.shape[0])
assert (qdim == kdim), 'query and key must be projected to the same dimension'
assert (w_q.shape == (qdim, Eq)), f'expecting query weights shape of {(qdim, Eq)}, but got {w_q.shape}'
assert (w_k.shape == (kdim, Ek)), f'expecting key weights shape of {(kdim, Ek)}, but got {w_k.shape}'
assert (w_v.shape == (vdim, Ev)), f'expecting value weights shape of {(vdim, Ev)}, but got {w_v.shape}'
assert ((b_q is None) or (b_q.shape == (qdim,))), f'expecting query bias shape of {(qdim,)}, but got {b_q.shape}'
assert ((b_k is None) or (b_k.shape == (kdim,))), f'expecting key bias shape of {(kdim,)}, but got {b_k.shape}'
assert ((b_v is None) or (b_v.shape == (vdim,))), f'expecting value bias shape of {(vdim,)}, but got {b_v.shape}'
return (F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)) |
def interleave_offsets(batch, nu):
groups = ([(batch // (nu + 1))] * (nu + 1))
for x in range((batch - sum(groups))):
groups[((- x) - 1)] += 1
offsets = [0]
for g in groups:
offsets.append((offsets[(- 1)] + g))
assert (offsets[(- 1)] == batch)
return offsets |
def delta_function(r0):
r0 = np.atleast_1d(r0)
def pdf(*args):
values = np.zeros_like(args[0])
diff = sum([((r0[i] - args[i]) ** 2) for i in range(len(args))])
idx = np.unravel_index(np.argmin(diff), diff.shape)
values[idx] = 1
return values
return pdf |
def _demucs(pretrained, url, **kwargs):
model = Demucs(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(state_dict)
return model |
def train(train_loader, model, criterion, optimizer, metric, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
mIoU = AverageMeter('mIoU', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, mIoU, prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (input, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
loss = criterion(output.view(output.shape[0], 19, (- 1)), target.view(target.shape[0], (- 1)))
losses.update(loss.item(), input.size(0))
metric.reset()
metric.add(output.max(1)[1].view(output.shape[0], 1024, 1024), target.view(target.shape[0], 1024, 1024))
mIoU.update(metric.value()[1])
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.print(i) |
def parse_stories(filename, word2id=None):
with open(filename, 'r') as f:
lines = f.readlines()
print('go through lines')
(stories, story) = ([], [])
for line in lines:
line = line.strip()
(nid, line) = line.split(' ', 1)
nid = int(nid)
if (nid == 1):
story = []
if ('\t' in line):
(query, answer, supporting) = line.split('\t')
query = tokenize(query)
substory = [x for x in story if x]
stories.append((substory, query, answer.lower()))
story.append('')
else:
sentence = tokenize(line)
story.append(sentence)
print('build vocab')
if (not word2id):
vocab = set(reduce((lambda x, y: (x + y)), [q for (_, q, _) in stories]))
print('reduce done!')
for (s, _, _) in stories:
for sentence in s:
vocab.update(sentence)
for (_, _, a) in stories:
vocab.add(a)
id2word = (['PAD_ID'] + list(vocab))
word2id = {w: i for (i, w) in enumerate(id2word)}
print('get max lengths')
(sentence_max, story_max) = (0, 0)
for (s, q, _) in stories:
if (len(q) > sentence_max):
sentence_max = len(q)
if (len(s) > story_max):
story_max = len(s)
for sentence in s:
if (len(sentence) > sentence_max):
sentence_max = len(sentence)
return (stories, sentence_max, story_max, word2id) |
def local_train_net_fednova(nets, selected, global_model, args, net_dataidx_map, test_dl=None, device='cpu'):
avg_acc = 0.0
a_list = []
d_list = []
n_list = []
global_model.to(device)
for (net_id, net) in nets.items():
if (net_id not in selected):
continue
dataidxs = net_dataidx_map[net_id]
logger.info(('Training network %s. n_training: %d' % (str(net_id), len(dataidxs))))
net.to(device)
noise_level = args.noise
if (net_id == (args.n_parties - 1)):
noise_level = 0
if (args.noise_type == 'space'):
(train_dl_local, test_dl_local, _, _) = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level, net_id, (args.n_parties - 1))
else:
noise_level = ((args.noise / (args.n_parties - 1)) * net_id)
(train_dl_local, test_dl_local, _, _) = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level)
(train_dl_global, test_dl_global, _, _) = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
n_epoch = args.epochs
(trainacc, testacc, a_i, d_i) = train_net_fednova(net_id, net, global_model, train_dl_local, test_dl, n_epoch, args.lr, args.optimizer, device=device)
a_list.append(a_i)
d_list.append(d_i)
n_i = len(train_dl_local.dataset)
n_list.append(n_i)
logger.info(('net %d final test acc %f' % (net_id, testacc)))
avg_acc += testacc
avg_acc /= len(selected)
if (args.alg == 'local_training'):
logger.info(('avg test acc %f' % avg_acc))
nets_list = list(nets.values())
return (nets_list, a_list, d_list, n_list) |
class KNearestNeighborDensityEstimatorTest(unittest.TestCase):
def setUp(self):
self.knn = KNearestNeighborDensityEstimator()
def test_should_the_density_estimator_compute_the_right_distances_case1(self):
solution1 = Solution(2, 2)
solution1.objectives = [1, 5]
solution2 = Solution(2, 2)
solution2.objectives = [2, 4]
solution3 = Solution(2, 2)
solution3.objectives = [3, 3]
solution4 = Solution(2, 2)
solution4.objectives = [5, 1]
solution_list = [solution1, solution2, solution3, solution4]
self.knn.compute_density_estimator(solution_list)
self.assertEqual(sqrt(2), solution1.attributes['knn_density'])
self.assertEqual(sqrt(2), solution2.attributes['knn_density'])
self.assertEqual(sqrt(2), solution3.attributes['knn_density'])
self.assertEqual(sqrt(((2 * 2) + (2 * 2))), solution4.attributes['knn_density'])
def test_should_the_density_estimator_sort_the_solution_list(self):
solution1 = Solution(2, 2)
solution1.objectives = [1, 5]
solution2 = Solution(2, 2)
solution2.objectives = [2, 4]
solution3 = Solution(2, 2)
solution3.objectives = [3, 3]
solution4 = Solution(2, 2)
solution4.objectives = [5, 1]
solution5 = Solution(2, 2)
solution5.objectives = [3, 2]
solution_list = [solution1, solution2, solution3, solution4, solution5]
self.knn.compute_density_estimator(solution_list)
self.knn.sort(solution_list)
self.assertEqual(solution_list[0], solution4)
self.assertEqual(solution_list[1], solution1)
self.assertEqual(solution_list[2], solution2)
self.assertEqual(solution_list[3], solution5)
def test_should_the_density_estimator_sort_the_solution_list_considering_the_draws(self):
solution1 = Solution(2, 2)
solution1.objectives = [1, 5]
solution2 = Solution(2, 2)
solution2.objectives = [2, 4]
solution3 = Solution(2, 2)
solution3.objectives = [3, 3]
solution4 = Solution(2, 2)
solution4.objectives = [5, 1]
solution_list = [solution1, solution2, solution3, solution4]
self.knn.compute_density_estimator(solution_list)
self.knn.sort(solution_list)
self.assertEqual(solution_list[0], solution4)
self.assertEqual(solution_list[1], solution3)
self.assertEqual(solution_list[2], solution1)
self.assertEqual(solution_list[3], solution2)
def test_should_the_density_estimator_sort_the_solution_list_considering_the_draws_case2(self):
points = [[0., 4.], [0., 5.], [0., 4.], [0., 2.], [0., 2.]]
population = []
for i in range(len(points)):
population.append(Solution(2, 2))
population[i].objectives = points[i]
self.knn.compute_density_estimator(population)
self.knn.sort(population)
self.assertEqual(5, len(population))
self.assertEqual([0., 4.], population[4].objectives) |
def corrector_relcoronpath_set(tol):
from phcpy.phcpy2c3 import py2c_set_value_of_continuation_parameter as set
return set(25, tol) |
class TrainSessionParameters(object):
def getSessionName(sessionName):
return (sessionName if (sessionName is not None) else 'trainSession')
def errorRequireChannelsTraining():
print('ERROR: Parameter "channelsTraining" needed but not provided in config file. This parameter should provide paths to files, as many as the channels (modalities) of the task. Each of the files should contain a list of paths, one for each case to train on. These paths in a file should point to the .nii(.gz) files that are the corresponding channel for a patient. Please provide it in the format: channelsTraining = ["path-to-file-for-channel1", ..., "path-to-file-for-channelN"]. The paths should be given in quotes, separated by commas (list of strings, python-style). Exiting.')
exit(1)
errReqChansTr = errorRequireChannelsTraining
def errorRequireGtLabelsTraining():
print('ERROR: Parameter "gtLabelsTraining" needed but not provided in config file. This parameter should provide the path to a file. That file should contain a list of paths, one for each case to train on. These paths should point to the .nii(.gz) files that contain the corresponding Ground-Truth labels for a case. Please provide it in the format: gtLabelsTraining = "path-to-file". The path should be given in quotes (a string, python-style). Exiting.')
exit(1)
errReqGtTr = errorRequireGtLabelsTraining
def errorRequireSamplMasksAreProbabMapsTrain():
print('ERROR: Parameter "samplingMasksAreProbabMapsTrain" needed but not provided in config file. This parameters is needed when parameter "useDefaultTrainingSamplingFromGtAndRoi" = False, in order to know whether the provided masks are probability maps or segmentation labels. Please provide parameter in the form: samplingMasksAreProbabMapsTrain = True/False. True if the masks given at "masksForPos(Neg)SamplingTrain" are probability maps (can be non-normalized, like weights), or False if they are binary segmentation masks. Exiting.')
exit(1)
errReqMasksTypeTr = errorRequireSamplMasksAreProbabMapsTrain
def warnDefaultPosSamplMasksTrain():
print('WARN: Parameter "weightedMapsForPosSamplingTrain" was not provided in config file, even though advanced training options were triggered by setting "useDefaultTrainingSamplingFromGtAndRoi" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of training samples. Can be provided in the format: weightedMapsForPosSamplingTrain = "path-to-file". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the positive samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!')
return None
warnDefPosMasksTr = warnDefaultPosSamplMasksTrain
def warnDefaultNegSamplMasksTrain():
print('WARN: Parameter "weightedMapsForNegSamplingTrain" was not provided in config file, even though advanced training options were triggered by setting "useDefaultTrainingSamplingFromGtAndRoi" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of training samples. Can be provided in the format: weightedMapsForNegSamplingTrain = "path-to-file". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the negative samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!')
return None
warnDefNegMasksTr = warnDefaultNegSamplMasksTrain
def errorRequirePredefinedLrSched():
print('ERROR: Parameter "typeOfLearningRateSchedule" was set to "predefined", but no predefined schedule was given. Please specify at which epochs to lower the Learning Rate, by providing the corresponding parameter in the format: predefinedSchedule = [epoch-for-1st-decrease, ..., epoch-for-last-decrease], where the epochs are specified by an integer > 0. Exiting.')
exit(1)
errReqPredLrSch = errorRequirePredefinedLrSched
def errorAutoRequiresValSamples():
print(('ERROR: Parameter "typeOfLearningRateSchedule" was set to "auto". This requires performing validation on samples throughout training, because this schedule lowers the Learning Rate when validation-accuracy plateaus. However the parameter "performValidationOnSamplesThroughoutTraining" was set to False in the configuration file, or was ommitted, which triggers the default value, False! Please set the parameter performValidationOnSamplesThroughoutTraining = True. You will then need to provide the path to the channels of the validation cases in the format: channelsValidation = ["path-to-file-that-lists-paths-to-channel-1-for-every-case", ..., "path-to-file-that-lists-paths-to-channel-N-for-every-case"] (python style list-of-strings).' + '\t Also, you will need to provide the Ground-Truth for the validation cases, in the format: gtLabelsValidation = "path-to-file", where the file lists the paths to the GT labels of each validation case. Exiting!'))
exit(1)
def errorRequireChannelsVal():
print('ERROR: Parameter "channelsValidation" was not provided, although it is required to perform validation, although validation was requested (parameters "performValidationOnSamplesThroughoutTraining" or "performFullInferenceOnValidationImagesEveryFewEpochs" was set to True). You will need to provide a list with path to files that list where the channels for each validation case can be found. The corresponding parameter must be provided in the format: channelsValidation = ["path-to-file-that-lists-paths-to-channel-1-for-every-case", ..., "path-to-file-that-lists-paths-to-channel-N-for-every-case"] (python style list-of-strings). Exiting.')
exit(1)
errReqChannsVal = errorRequireChannelsVal
def errorReqGtLabelsVal():
print('ERROR: Parameter "gtLabelsValidation" was not provided, although it is required to perform validation on training-samples, which was requested (parameter "performValidationOnSamplesThroughoutTraining" was set to True). It is also useful so that the DSC score is reported if full-inference on the validation samples is performed (when parameter "performFullInferenceOnValidationImagesEveryFewEpochs" is set to True)! You will need to provide the path to a file that lists where the GT labels for each validation case can be found. The corresponding parameter must be provided in the format: gtLabelsValidation = "path-to-file-that-lists-GT-labels-for-every-case" (python style string). Exiting.')
exit(1)
def errorReqNumberOfEpochsBetweenFullValInfGreaterThan0():
print('ERROR: It was requested to perform full-inference on validation images by setting parameter "performFullInferenceOnValidationImagesEveryFewEpochs" to True. For this, it is required to specify the number of epochs between two full-inference procedures. This number was given equal to 0. Please specify a number greater than 0, in the format: numberOfEpochsBetweenFullInferenceOnValImages = 1 (Any integer. Default is 1). Exiting!')
exit(1)
def errorRequireNamesOfPredictionsVal():
print('ERROR: It was requested to perform full-inference on validation images by setting parameter "performFullInferenceOnValidationImagesEveryFewEpochs" to True and then save some of the results (segmentation maps, probability maps or feature maps), either manually or by default. For this, it is required to specify the path to a file, which should contain names to give to the results. Please specify the path to such a file in the format: namesForPredictionsPerCaseVal = "./validation/validationNamesOfPredictionsSimple.cfg" (python-style string). Exiting!')
exit(1)
def errorRequirePercentOfPosSamplesVal():
print('ERROR: Advanced sampling was enabled by setting: useDefaultUniformValidationSampling = False. This requires providing the percentage of validation samples that should be extracted as positives (from the positive weight-map). Please specify a float between 0.0 and 1.0, eg in the format: percentOfSamplesToExtractPositiveVal = 0.5. Exiting!')
exit(1)
errReqPercPosTrVal = errorRequirePercentOfPosSamplesVal
def warnDefaultPercentOfPosSamplesVal():
print('WARN: Advanced sampling was enabled by setting: useDefaultUniformValidationSampling = False. This requires providing the percentage of validation samples that should be extracted as positives (from the positive weight-map). Please specify a float between 0.0 and 1.0, eg in the format: percentOfSamplesToExtractPositiveVal = 0.5. \n\tDEFAULT: In the case not given (like now!) default value of 0.5 is used!')
return 0.5
warnDefPercPosTrVal = warnDefaultPercentOfPosSamplesVal
def warnDefaultPosSamplMasksVal():
print('WARN: Parameter "weightedMapsForPosSamplingVal" was not provided in config file, even though advanced validation-sampling options were triggered by setting "useDefaultUniformValidationSampling" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of validation samples. Can be provided in the format: weightedMapsForPosSamplingVal = "path-to-file". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the positive samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!')
return None
warnDefPosMasksVal = warnDefaultPosSamplMasksVal
def warnDefaultNegSamplMasksVal():
print('WARN: Parameter "weightedMapsForNegSamplingVal" was not provided in config file, even though advanced validation-sampling options were triggered by setting "useDefaultUniformValidationSampling" = False. This parameter should point to a file that lists the paths (per case/patient) to weighted-maps that indicate where to perform the sampling of validation samples. Can be provided in the format: weightedMapsForNegSamplingVal = "path-to-file". The target file should have one entry per case (patient). Each of them should be pointing to .nii(.gz) files that indicate where to extract the negative samples. \n\tDEFAULT: In the case it is not provided (like now!) the corresponding samples are extracted uniformly from the whole volume!')
return None
warnDefNegMasksVal = warnDefaultNegSamplMasksVal
def errorRequireOptimizer012():
print('ERROR: The parameter "sgd0orAdam1orRms2" must be given 0,1 or 2. Omit for default. Exiting!')
exit(1)
def errorRequireMomentumClass0Nestov1():
print('ERROR: The parameter "classicMom0OrNesterov1" must be given 0 or 1. Omit for default. Exiting!')
exit(1)
def errorRequireMomValueBetween01():
print('ERROR: The parameter "momentumValue" must be given between 0.0 and 1.0 Omit for default. Exiting!')
exit(1)
def errorRequireMomNonNorm0Norm1():
print('ERROR: The parameter "momNonNorm0orNormalized1" must be given 0 or 1. Omit for default. Exiting!')
exit(1)
def errorDeprecatedPercPosTraining():
print((((((('ERROR: Parameter "percentOfSamplesToExtractPositiveTrain" in the config file is now Deprecated! ' + ' Please remove this entry from the train-config file. If you do not want the default behaviour but ') + 'instead wish to specify the proportion of Foreground and Background samples yourself, please ') + ' activate the "Advanced Sampling" options (useDefaultTrainingSamplingFromGtAndRoi=False), ') + ' choose type-of-sampling Foreground/Background (typeOfSamplingForTraining = 0) and use the new ') + 'variable "proportionOfSamplesToExtractPerCategoryTraining" which replaces the functionality of the deprecated ') + '(eg. proportionOfSamplesToExtractPerCategoryTraining = [0.3, 0.7]). Exiting.'))
exit(1)
errDeprPercPosTr = errorDeprecatedPercPosTraining
def __init__(self, log, mainOutputAbsFolder, folderForSessionCnnModels, folderForPredictionsVal, folderForFeaturesVal, num_classes, model_name, cfg):
self.log = log
self.mainOutputAbsFolder = mainOutputAbsFolder
self.sessionName = self.getSessionName(cfg[cfg.SESSION_NAME])
abs_path_to_cfg = cfg.get_abs_path_to_cfg()
abs_path_to_saved = (getAbsPathEvenIfRelativeIsGiven(cfg[cfg.SAVED_MODEL], abs_path_to_cfg) if (cfg[cfg.SAVED_MODEL] is not None) else None)
self.savedModelFilepath = (check_and_adjust_path_to_ckpt(self.log, abs_path_to_saved) if (abs_path_to_saved is not None) else None)
self.filepath_to_save_models = ((((folderForSessionCnnModels + '/') + model_name) + '.') + self.sessionName)
if (cfg[cfg.CHANNELS_TR] is None):
self.errReqChansTr()
if (cfg[cfg.GT_LABELS_TR] is None):
self.errReqGtTr()
listOfAListPerChannelWithFilepathsOfAllCasesTrain = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(channelConfPath, abs_path_to_cfg)) for channelConfPath in cfg[cfg.CHANNELS_TR]]
self.channelsFilepathsTrain = [list(item) for item in zip(*tuple(listOfAListPerChannelWithFilepathsOfAllCasesTrain))]
self.gtLabelsFilepathsTrain = parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(cfg[cfg.GT_LABELS_TR], abs_path_to_cfg))
self.providedRoiMasksTrain = (True if cfg[cfg.ROI_MASKS_TR] else False)
self.roiMasksFilepathsTrain = (parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(cfg[cfg.ROI_MASKS_TR], abs_path_to_cfg)) if self.providedRoiMasksTrain else [])
if (cfg[cfg.PERC_POS_SAMPLES_TR] is not None):
self.errDeprPercPosTr()
self.useDefaultTrainingSamplingFromGtAndRoi = (cfg[cfg.DEFAULT_TR_SAMPLING] if (cfg[cfg.DEFAULT_TR_SAMPLING] is not None) else True)
DEFAULT_SAMPLING_TYPE_TR = 0
if self.useDefaultTrainingSamplingFromGtAndRoi:
self.samplingTypeInstanceTrain = samplingType.SamplingType(self.log, DEFAULT_SAMPLING_TYPE_TR, num_classes)
numberOfCategoriesOfSamplesTr = self.samplingTypeInstanceTrain.getNumberOfCategoriesToSample()
self.samplingTypeInstanceTrain.setPercentOfSamplesPerCategoryToSample(([(1.0 / numberOfCategoriesOfSamplesTr)] * numberOfCategoriesOfSamplesTr))
self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining = None
else:
samplingTypeToUseTr = (cfg[cfg.TYPE_OF_SAMPLING_TR] if (cfg[cfg.TYPE_OF_SAMPLING_TR] is not None) else DEFAULT_SAMPLING_TYPE_TR)
self.samplingTypeInstanceTrain = samplingType.SamplingType(self.log, samplingTypeToUseTr, num_classes)
if ((samplingTypeToUseTr in [0, 3]) and (cfg[cfg.PROP_OF_SAMPLES_PER_CAT_TR] is not None)):
self.samplingTypeInstanceTrain.setPercentOfSamplesPerCategoryToSample(cfg[cfg.PROP_OF_SAMPLES_PER_CAT_TR])
else:
numberOfCategoriesOfSamplesTr = self.samplingTypeInstanceTrain.getNumberOfCategoriesToSample()
self.samplingTypeInstanceTrain.setPercentOfSamplesPerCategoryToSample(([(1.0 / numberOfCategoriesOfSamplesTr)] * numberOfCategoriesOfSamplesTr))
if (cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_TR] is not None):
listOfAListPerWeightMapCategoryWithFilepathsOfAllCasesTrain = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(weightMapConfPath, abs_path_to_cfg)) for weightMapConfPath in cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_TR]]
else:
listOfAListPerWeightMapCategoryWithFilepathsOfAllCasesTrain = None
self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining = listOfAListPerWeightMapCategoryWithFilepathsOfAllCasesTrain
self.providedWeightMapsToSampleForEachCategoryTraining = (self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining is not None)
self.numberOfEpochs = (cfg[cfg.NUM_EPOCHS] if (cfg[cfg.NUM_EPOCHS] is not None) else 35)
self.numberOfSubepochs = (cfg[cfg.NUM_SUBEP] if (cfg[cfg.NUM_SUBEP] is not None) else 20)
self.numOfCasesLoadedPerSubepoch = (cfg[cfg.NUM_CASES_LOADED_PERSUB] if (cfg[cfg.NUM_CASES_LOADED_PERSUB] is not None) else 50)
self.segmentsLoadedOnGpuPerSubepochTrain = (cfg[cfg.NUM_TR_SEGMS_LOADED_PERSUB] if (cfg[cfg.NUM_TR_SEGMS_LOADED_PERSUB] is not None) else 1000)
self.num_parallel_proc_sampling = (cfg[cfg.NUM_OF_PROC_SAMPL] if (cfg[cfg.NUM_OF_PROC_SAMPL] is not None) else 1)
self.nepsilon = (cfg[cfg.N_EPS] if (cfg[cfg.N_EPS] is not None) else (10 ** (- 6)))
self.nxi = (cfg[cfg.N_XI] if (cfg[cfg.N_XI] is not None) else 2.5)
self.nside = (cfg[cfg.N_SIDE] if (cfg[cfg.N_SIDE] is not None) else 1)
self.probaugmentbackground = (cfg[cfg.PROB_AUGBG] if (cfg[cfg.PROB_AUGBG] is not None) else 1)
assert (cfg[cfg.LR_SCH_TYPE] in ['stable', 'predef', 'poly', 'auto', 'expon'])
self.lr_sched_params = {'type': (cfg[cfg.LR_SCH_TYPE] if (cfg[cfg.LR_SCH_TYPE] is not None) else 'poly'), 'predef': {'epochs': cfg[cfg.PREDEF_SCH], 'div_lr_by': (cfg[cfg.DIV_LR_BY] if (cfg[cfg.DIV_LR_BY] is not None) else 2.0)}, 'auto': {'min_incr_of_val_acc_considered': (cfg[cfg.AUTO_MIN_INCR_VAL_ACC] if (cfg[cfg.AUTO_MIN_INCR_VAL_ACC] is not None) else 0.0), 'epochs_wait_before_decr': (cfg[cfg.NUM_EPOCHS_WAIT] if (cfg[cfg.NUM_EPOCHS_WAIT] is not None) else 5), 'div_lr_by': (cfg[cfg.DIV_LR_BY] if (cfg[cfg.DIV_LR_BY] is not None) else 2.0)}, 'poly': {'epochs_wait_before_decr': (cfg[cfg.NUM_EPOCHS_WAIT] if (cfg[cfg.NUM_EPOCHS_WAIT] is not None) else (self.numberOfEpochs / 3)), 'final_ep_for_sch': self.numberOfEpochs}, 'expon': {'epochs_wait_before_decr': (cfg[cfg.NUM_EPOCHS_WAIT] if (cfg[cfg.NUM_EPOCHS_WAIT] is not None) else (self.numberOfEpochs / 3)), 'final_ep_for_sch': self.numberOfEpochs, 'lr_to_reach_at_last_ep': (cfg[cfg.EXPON_SCH][0] if (cfg[cfg.EXPON_SCH] is not None) else (1.0 / (2 ** 8))), 'mom_to_reach_at_last_ep': (cfg[cfg.EXPON_SCH][1] if (cfg[cfg.EXPON_SCH] is not None) else 0.9)}}
if ((self.lr_sched_params['type'] == 'predef') and (self.lr_sched_params['predef']['epochs'] is None)):
self.errReqPredLrSch()
self.reflectImagesPerAxis = (cfg[cfg.REFL_AUGM_PER_AXIS] if cfg[cfg.REFL_AUGM_PER_AXIS] else [False, False, False])
self.performIntAugm = (cfg[cfg.PERF_INT_AUGM_BOOL] if (cfg[cfg.PERF_INT_AUGM_BOOL] is not None) else False)
if self.performIntAugm:
self.sampleIntAugmShiftWithMuAndStd = (cfg[cfg.INT_AUGM_SHIF_MUSTD] if cfg[cfg.INT_AUGM_SHIF_MUSTD] else [0.0, 0.05])
self.sampleIntAugmMultiWithMuAndStd = (cfg[cfg.INT_AUGM_MULT_MUSTD] if cfg[cfg.INT_AUGM_MULT_MUSTD] else [1.0, 0.01])
self.doIntAugm_shiftMuStd_multiMuStd = [True, self.sampleIntAugmShiftWithMuAndStd, self.sampleIntAugmMultiWithMuAndStd]
else:
self.doIntAugm_shiftMuStd_multiMuStd = [False, 'plcholder', [], []]
self.val_on_samples_during_train = (cfg[cfg.PERFORM_VAL_SAMPLES] if (cfg[cfg.PERFORM_VAL_SAMPLES] is not None) else False)
if ((self.lr_sched_params['type'] == 'auto') and (not self.val_on_samples_during_train)):
self.errorAutoRequiresValSamples()
self.val_on_whole_volumes = (cfg[cfg.PERFORM_VAL_INFERENCE] if (cfg[cfg.PERFORM_VAL_INFERENCE] is not None) else False)
if (self.val_on_samples_during_train or self.val_on_whole_volumes):
if cfg[cfg.CHANNELS_VAL]:
listOfAListPerChannelWithFilepathsOfAllCasesVal = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(channelConfPath, abs_path_to_cfg)) for channelConfPath in cfg[cfg.CHANNELS_VAL]]
self.channelsFilepathsVal = [list(item) for item in zip(*tuple(listOfAListPerChannelWithFilepathsOfAllCasesVal))]
else:
self.errReqChannsVal()
else:
self.channelsFilepathsVal = []
if self.val_on_samples_during_train:
self.gtLabelsFilepathsVal = (parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(cfg[cfg.GT_LABELS_VAL], abs_path_to_cfg)) if (cfg[cfg.GT_LABELS_VAL] is not None) else self.errorReqGtLabelsVal())
elif self.val_on_whole_volumes:
self.gtLabelsFilepathsVal = (parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(cfg[cfg.GT_LABELS_VAL], abs_path_to_cfg)) if (cfg[cfg.GT_LABELS_VAL] is not None) else [])
else:
self.gtLabelsFilepathsVal = []
self.providedGtVal = (True if (self.gtLabelsFilepathsVal is not None) else False)
self.providedRoiMasksVal = (True if (cfg[cfg.ROI_MASKS_VAL] is not None) else False)
self.roiMasksFilepathsVal = (parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(cfg[cfg.ROI_MASKS_VAL], abs_path_to_cfg)) if self.providedRoiMasksVal else [])
self.segmentsLoadedOnGpuPerSubepochVal = (cfg[cfg.NUM_VAL_SEGMS_LOADED_PERSUB] if (cfg[cfg.NUM_VAL_SEGMS_LOADED_PERSUB] is not None) else 3000)
self.useDefaultUniformValidationSampling = (cfg[cfg.DEFAULT_VAL_SAMPLING] if (cfg[cfg.DEFAULT_VAL_SAMPLING] is not None) else True)
DEFAULT_SAMPLING_TYPE_VAL = 1
if self.useDefaultUniformValidationSampling:
self.samplingTypeInstanceVal = samplingType.SamplingType(self.log, DEFAULT_SAMPLING_TYPE_VAL, num_classes)
numberOfCategoriesOfSamplesVal = self.samplingTypeInstanceVal.getNumberOfCategoriesToSample()
self.samplingTypeInstanceVal.setPercentOfSamplesPerCategoryToSample(([(1.0 / numberOfCategoriesOfSamplesVal)] * numberOfCategoriesOfSamplesVal))
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal = None
else:
samplingTypeToUseVal = (cfg[cfg.TYPE_OF_SAMPLING_VAL] if (cfg[cfg.TYPE_OF_SAMPLING_VAL] is not None) else DEFAULT_SAMPLING_TYPE_VAL)
self.samplingTypeInstanceVal = samplingType.SamplingType(self.log, samplingTypeToUseVal, num_classes)
if ((samplingTypeToUseVal in [0, 3]) and (cfg[cfg.PROP_OF_SAMPLES_PER_CAT_VAL] is not None)):
self.samplingTypeInstanceVal.setPercentOfSamplesPerCategoryToSample(cfg[cfg.PROP_OF_SAMPLES_PER_CAT_VAL])
else:
numberOfCategoriesOfSamplesVal = self.samplingTypeInstanceVal.getNumberOfCategoriesToSample()
self.samplingTypeInstanceVal.setPercentOfSamplesPerCategoryToSample(([(1.0 / numberOfCategoriesOfSamplesVal)] * numberOfCategoriesOfSamplesVal))
if (cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_VAL] is not None):
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal = [parseAbsFileLinesInList(getAbsPathEvenIfRelativeIsGiven(weightMapConfPath, abs_path_to_cfg)) for weightMapConfPath in cfg[cfg.WEIGHT_MAPS_PER_CAT_FILEPATHS_VAL]]
else:
self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal = None
self.providedWeightMapsToSampleForEachCategoryValidation = (self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal is not None)
self.num_epochs_between_val_on_whole_volumes = (cfg[cfg.NUM_EPOCHS_BETWEEN_VAL_INF] if (cfg[cfg.NUM_EPOCHS_BETWEEN_VAL_INF] is not None) else 1)
if ((self.num_epochs_between_val_on_whole_volumes == 0) and self.val_on_whole_volumes):
self.errorReqNumberOfEpochsBetweenFullValInfGreaterThan0()
self.saveSegmentationVal = (cfg[cfg.SAVE_SEGM_VAL] if (cfg[cfg.SAVE_SEGM_VAL] is not None) else True)
self.saveProbMapsBoolPerClassVal = (cfg[cfg.SAVE_PROBMAPS_PER_CLASS_VAL] if ((cfg[cfg.SAVE_PROBMAPS_PER_CLASS_VAL] is not None) and (cfg[cfg.SAVE_PROBMAPS_PER_CLASS_VAL] != [])) else ([True] * num_classes))
self.filepathsToSavePredictionsForEachPatientVal = None
self.suffixForSegmAndProbsDictVal = (cfg[cfg.SUFFIX_SEGM_PROB_VAL] if (cfg[cfg.SUFFIX_SEGM_PROB_VAL] is not None) else {'segm': 'Segm', 'prob': 'ProbMapClass'})
self.saveIndividualFmImagesVal = (cfg[cfg.SAVE_INDIV_FMS_VAL] if (cfg[cfg.SAVE_INDIV_FMS_VAL] is not None) else False)
self.saveMultidimensionalImageWithAllFmsVal = (cfg[cfg.SAVE_4DIM_FMS_VAL] if (cfg[cfg.SAVE_4DIM_FMS_VAL] is not None) else False)
if ((self.saveIndividualFmImagesVal == True) or (self.saveMultidimensionalImageWithAllFmsVal == True)):
indices_fms_per_pathtype_per_layer_to_save = (([cfg[cfg.INDICES_OF_FMS_TO_SAVE_NORMAL_VAL]] + [cfg[cfg.INDICES_OF_FMS_TO_SAVE_SUBSAMPLED_VAL]]) + [cfg[cfg.INDICES_OF_FMS_TO_SAVE_FC_VAL]])
self.indices_fms_per_pathtype_per_layer_to_save = [(item if (item is not None) else []) for item in indices_fms_per_pathtype_per_layer_to_save]
else:
self.indices_fms_per_pathtype_per_layer_to_save = None
self.filepathsToSaveFeaturesForEachPatientVal = None
self.namesToSavePredictionsAndFeaturesVal = (parseFileLinesInList(getAbsPathEvenIfRelativeIsGiven(cfg[cfg.NAMES_FOR_PRED_PER_CASE_VAL], abs_path_to_cfg)) if cfg[cfg.NAMES_FOR_PRED_PER_CASE_VAL] else None)
if ((not self.namesToSavePredictionsAndFeaturesVal) and self.val_on_whole_volumes and (self.saveSegmentationVal or (True in self.saveProbMapsBoolPerClassVal) or self.saveIndividualFmImagesVal or self.saveMultidimensionalImageWithAllFmsVal)):
self.errorRequireNamesOfPredictionsVal()
self.padInputImagesBool = (cfg[cfg.PAD_INPUT] if (cfg[cfg.PAD_INPUT] is not None) else True)
self.numberOfCasesTrain = len(self.channelsFilepathsTrain)
self.numberOfCasesVal = len(self.channelsFilepathsVal)
self.run_input_checks = (cfg[cfg.RUN_INP_CHECKS] if (cfg[cfg.RUN_INP_CHECKS] is not None) else True)
self.useSameSubChannelsAsSingleScale = True
self.subsampledChannelsFilepathsTrain = 'placeholder'
self.subsampledChannelsFilepathsVal = 'placeholder'
self.reweight_classes_in_cost = (cfg[cfg.W_C_IN_COST] if (cfg[cfg.W_C_IN_COST] is not None) else {'type': None, 'prms': None, 'schedule': [0, self.numberOfEpochs]})
if (self.reweight_classes_in_cost['type'] == 'per_c'):
assert (len(self.reweight_classes_in_cost['prms']) == num_classes)
self._makeFilepathsForPredictionsAndFeaturesVal(folderForPredictionsVal, folderForFeaturesVal)
self.learningRate = (cfg[cfg.LRATE] if (cfg[cfg.LRATE] is not None) else 0.001)
self.optimizerSgd0Adam1Rms2 = (cfg[cfg.OPTIMIZER] if (cfg[cfg.OPTIMIZER] is not None) else 2)
if (self.optimizerSgd0Adam1Rms2 == 0):
self.b1Adam = 'placeholder'
self.b2Adam = 'placeholder'
self.eAdam = 'placeholder'
self.rhoRms = 'placeholder'
self.eRms = 'placeholder'
elif (self.optimizerSgd0Adam1Rms2 == 1):
self.b1Adam = (cfg[cfg.B1_ADAM] if (cfg[cfg.B1_ADAM] is not None) else 0.9)
self.b2Adam = (cfg[cfg.B2_ADAM] if (cfg[cfg.B2_ADAM] is not None) else 0.999)
self.eAdam = (cfg[cfg.EPS_ADAM] if (cfg[cfg.EPS_ADAM] is not None) else (10 ** (- 8)))
self.rhoRms = 'placeholder'
self.eRms = 'placeholder'
elif (self.optimizerSgd0Adam1Rms2 == 2):
self.b1Adam = 'placeholder'
self.b2Adam = 'placeholder'
self.eAdam = 'placeholder'
self.rhoRms = (cfg[cfg.RHO_RMS] if (cfg[cfg.RHO_RMS] is not None) else 0.9)
self.eRms = (cfg[cfg.EPS_RMS] if (cfg[cfg.EPS_RMS] is not None) else (10 ** (- 4)))
else:
self.errorRequireOptimizer012()
self.classicMom0Nesterov1 = (cfg[cfg.MOM_TYPE] if (cfg[cfg.MOM_TYPE] is not None) else 1)
if (self.classicMom0Nesterov1 not in [0, 1]):
self.errorRequireMomentumClass0Nestov1()
self.momNonNormalized0Normalized1 = (cfg[cfg.MOM_NORM_NONNORM] if (cfg[cfg.MOM_NORM_NONNORM] is not None) else 1)
if (self.momNonNormalized0Normalized1 not in [0, 1]):
self.errorRequireMomNonNorm0Norm1()
self.momentumValue = (cfg[cfg.MOM] if (cfg[cfg.MOM] is not None) else 0.6)
if ((self.momentumValue < 0.0) or (self.momentumValue > 1)):
self.errorRequireMomValueBetween01()
self.L1_reg_weight = (cfg[cfg.L1_REG] if (cfg[cfg.L1_REG] is not None) else 1e-06)
self.L2_reg_weight = (cfg[cfg.L2_REG] if (cfg[cfg.L2_REG] is not None) else 0.0001)
self.marginm = (cfg[cfg.MARGIN] if (cfg[cfg.MARGIN] is not None) else 0)
self.mixup_rate = (cfg[cfg.MIX_RATE] if (cfg[cfg.MIX_RATE] is not None) else 0)
self.mixup_biasmargin = (cfg[cfg.MIX_MAR] if (cfg[cfg.MIX_MAR] is not None) else 0)
layersToFreezePerPathwayType = [cfg[cfg.LAYERS_TO_FREEZE_NORM], cfg[cfg.LAYERS_TO_FREEZE_SUBS], cfg[cfg.LAYERS_TO_FREEZE_FC]]
indicesOfLayersToFreezeNorm = ([(l - 1) for l in layersToFreezePerPathwayType[0]] if (layersToFreezePerPathwayType[0] is not None) else [])
indicesOfLayersToFreezeSubs = ([(l - 1) for l in layersToFreezePerPathwayType[1]] if (layersToFreezePerPathwayType[1] is not None) else indicesOfLayersToFreezeNorm)
indicesOfLayersToFreezeFc = ([(l - 1) for l in layersToFreezePerPathwayType[2]] if (layersToFreezePerPathwayType[2] is not None) else [])
self.indicesOfLayersPerPathwayTypeToFreeze = [indicesOfLayersToFreezeNorm, indicesOfLayersToFreezeSubs, indicesOfLayersToFreezeFc]
self.losses_and_weights = (cfg[cfg.LOSSES_WEIGHTS] if (cfg[cfg.LOSSES_WEIGHTS] is not None) else {'xentr': 1.0, 'iou': None, 'dsc': None, 'focaloneside': None, 'focalonesidegama': 2.0})
assert (True in [(self.losses_and_weights[k] is not None) for k in ['xentr', 'iou', 'dsc', 'focaloneside']])
def _makeFilepathsForPredictionsAndFeaturesVal(self, absPathToFolderForPredictionsFromSession, absPathToFolderForFeaturesFromSession):
self.filepathsToSavePredictionsForEachPatientVal = []
self.filepathsToSaveFeaturesForEachPatientVal = []
if (self.namesToSavePredictionsAndFeaturesVal is not None):
for case_i in range(self.numberOfCasesVal):
filepathForCasePrediction = ((absPathToFolderForPredictionsFromSession + '/') + self.namesToSavePredictionsAndFeaturesVal[case_i])
self.filepathsToSavePredictionsForEachPatientVal.append(filepathForCasePrediction)
filepathForCaseFeatures = ((absPathToFolderForFeaturesFromSession + '/') + self.namesToSavePredictionsAndFeaturesVal[case_i])
self.filepathsToSaveFeaturesForEachPatientVal.append(filepathForCaseFeatures)
elif (self.numberOfCasesVal > 1):
for case_i in range(self.numberOfCasesVal):
self.filepathsToSavePredictionsForEachPatientVal.append((((absPathToFolderForPredictionsFromSession + '/pred_case') + str(case_i)) + '.nii.gz'))
self.filepathsToSaveFeaturesForEachPatientVal.append((((absPathToFolderForPredictionsFromSession + '/pred_case') + str(case_i)) + '.nii.gz'))
else:
self.filepathsToSavePredictionsForEachPatientVal.append(absPathToFolderForPredictionsFromSession)
self.filepathsToSaveFeaturesForEachPatientVal.append(absPathToFolderForPredictionsFromSession)
def get_path_to_load_model_from(self):
return self.savedModelFilepath
def print_params(self):
logPrint = self.log.print3
logPrint('')
logPrint('')
logPrint(' PARAMETERS FOR THIS TRAINING SESSION ')
logPrint('')
logPrint(("Session's name = " + str(self.sessionName)))
logPrint(('Model will be loaded from save = ' + str(self.savedModelFilepath)))
logPrint('~~Output~~')
logPrint(('Main output folder = ' + str(self.mainOutputAbsFolder)))
logPrint(('Path and filename to save trained models = ' + str(self.filepath_to_save_models)))
logPrint('Generic Information')
logPrint(('Number of Cases for Training = ' + str(self.numberOfCasesTrain)))
logPrint(('Number of Cases for Validation = ' + str(self.numberOfCasesVal)))
logPrint('Training parameters')
logPrint(('Filepaths to Channels of the Training Cases = ' + str(self.channelsFilepathsTrain)))
logPrint(('Filepaths to Ground-Truth labels of the Training Cases = ' + str(self.gtLabelsFilepathsTrain)))
logPrint('~~Sampling~~')
logPrint(('Region-Of-Interest Masks provided = ' + str(self.providedRoiMasksTrain)))
logPrint(('Filepaths to ROI Masks of the Training Cases = ' + str(self.roiMasksFilepathsTrain)))
logPrint('~~Advanced Sampling~~')
logPrint((('Using default sampling = ' + str(self.useDefaultTrainingSamplingFromGtAndRoi)) + '. NOTE: Adv.Sampl.Params are auto-set to perform default sampling if True.'))
logPrint((((('Type of Sampling = ' + str(self.samplingTypeInstanceTrain.getStringOfSamplingType())) + ' (') + str(self.samplingTypeInstanceTrain.getIntSamplingType())) + ')'))
logPrint(('Sampling Categories = ' + str(self.samplingTypeInstanceTrain.getStringsPerCategoryToSample())))
logPrint(('Percent of Samples to extract per Sampling Category = ' + str(self.samplingTypeInstanceTrain.getPercentOfSamplesPerCategoryToSample())))
logPrint(('Provided Weight-Maps, pointing where to focus sampling for each category (if False, samples will be extracted based on GT and ROI) = ' + str(self.providedWeightMapsToSampleForEachCategoryTraining)))
logPrint(('Paths to weight-maps for sampling of each category = ' + str(self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining)))
logPrint('~~Training Cycle~~')
logPrint(('Number of Epochs = ' + str(self.numberOfEpochs)))
logPrint(('Number of Subepochs per epoch = ' + str(self.numberOfSubepochs)))
logPrint(('Number of cases to load per Subepoch (for extracting the samples for this subepoch) = ' + str(self.numOfCasesLoadedPerSubepoch)))
logPrint((('Number of Segments loaded on GPU per subepoch for Training = ' + str(self.segmentsLoadedOnGpuPerSubepochTrain)) + '. NOTE: This number of segments divided by the batch-size defines the number of optimization-iterations that will be performed every subepoch!'))
logPrint(('Number of parallel processes for sampling = ' + str(self.num_parallel_proc_sampling)))
logPrint('~~Learning Rate Schedule~~')
logPrint(('Type of schedule = ' + str(self.lr_sched_params['type'])))
logPrint(('[Predef] Predefined schedule of epochs when the LR will be lowered = ' + str(self.lr_sched_params['predef']['epochs'])))
logPrint(('[Predef] When decreasing Learning Rate, divide LR by = ' + str(self.lr_sched_params['predef']['div_lr_by'])))
logPrint(('[Poly] Initial epochs to wait before lowering LR = ' + str(self.lr_sched_params['poly']['epochs_wait_before_decr'])))
logPrint(('[Poly] Final epoch for the schedule = ' + str(self.lr_sched_params['poly']['final_ep_for_sch'])))
logPrint(('[Auto] Initial epochs to wait before lowering LR = ' + str(self.lr_sched_params['auto']['epochs_wait_before_decr'])))
logPrint(('[Auto] When decreasing Learning Rate, divide LR by = ' + str(self.lr_sched_params['auto']['div_lr_by'])))
logPrint(('[Auto] Minimum increase in validation accuracy (0. to 1.) that resets the waiting counter = ' + str(self.lr_sched_params['auto']['min_incr_of_val_acc_considered'])))
logPrint(('[Expon] (Deprecated) parameters = ' + str(self.lr_sched_params['expon'])))
logPrint('~~Data Augmentation During Training~~')
logPrint(('Reflect images per axis = ' + str(self.reflectImagesPerAxis)))
logPrint(("Perform intensity-augmentation [I'= (I+shift)*mult] = " + str(self.performIntAugm)))
logPrint(('[Int. Augm.] Sample Shift from N(mu,std) = ' + str(self.doIntAugm_shiftMuStd_multiMuStd[1])))
logPrint(('[Int. Augm.] Sample Multi from N(mu,std) = ' + str(self.doIntAugm_shiftMuStd_multiMuStd[2])))
logPrint(('[Int. Augm.] (DEBUGGING:) full parameters [ doIntAugm, shift, mult] = ' + str(self.doIntAugm_shiftMuStd_multiMuStd)))
logPrint(('Noise epsilon (train) = ' + str(self.nepsilon)))
logPrint(('Noise initial stength (train) = ' + str(self.nxi)))
logPrint(('Adversarial direction mode (train) = ' + str(self.nside)))
logPrint(('The probability of background sample being augmented = ' + str(self.probaugmentbackground)))
logPrint('Validation parameters')
logPrint(('Perform Validation on Samples throughout training? = ' + str(self.val_on_samples_during_train)))
logPrint(('Perform Full Inference on validation cases every few epochs? = ' + str(self.val_on_whole_volumes)))
logPrint(('Filepaths to Channels of the Validation Cases (Req for either of the above) = ' + str(self.channelsFilepathsVal)))
logPrint((('Provided Ground-Truth for Validation = ' + str(self.providedGtVal)) + '. NOTE: Required for Val on samples. Not Req for Full-Inference, but DSC will be reported if provided.'))
logPrint(('Filepaths to Ground-Truth labels of the Validation Cases = ' + str(self.gtLabelsFilepathsVal)))
logPrint((('Provided ROI masks for Validation = ' + str(self.providedRoiMasksVal)) + '. NOTE: Validation-sampling and Full-Inference will be limited within this mask if provided. If not provided, Negative Validation samples will be extracted from whole volume, except if advanced-sampling is enabled, and the user provided separate weight-maps for sampling.'))
logPrint(('Filepaths to ROI masks for Validation Cases = ' + str(self.roiMasksFilepathsVal)))
logPrint('~~~~~~~Validation on Samples throughout Training~~~~~~~')
logPrint(('Number of Segments loaded on GPU per subepoch for Validation = ' + str(self.segmentsLoadedOnGpuPerSubepochVal)))
logPrint('~~Advanced Sampling~~')
logPrint((('Using default uniform sampling for validation = ' + str(self.useDefaultUniformValidationSampling)) + '. NOTE: Adv.Sampl.Params are auto-set to perform uniform-sampling if True.'))
logPrint((((('Type of Sampling = ' + str(self.samplingTypeInstanceVal.getStringOfSamplingType())) + ' (') + str(self.samplingTypeInstanceVal.getIntSamplingType())) + ')'))
logPrint(('Sampling Categories = ' + str(self.samplingTypeInstanceVal.getStringsPerCategoryToSample())))
logPrint(('Percent of Samples to extract per Sampling Category = ' + str(self.samplingTypeInstanceVal.getPercentOfSamplesPerCategoryToSample())))
logPrint(('Provided Weight-Maps, pointing where to focus sampling for each category (if False, samples will be extracted based on GT and ROI) = ' + str(self.providedWeightMapsToSampleForEachCategoryValidation)))
logPrint(('Paths to weight-maps for sampling of each category = ' + str(self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal)))
logPrint('~~~~~Validation with Full Inference on Validation Cases~~~~~')
logPrint(('Perform Full-Inference on Val. cases every that many epochs = ' + str(self.num_epochs_between_val_on_whole_volumes)))
logPrint('~~Predictions (segmentations and prob maps on val. cases)~~')
logPrint(('Save Segmentations = ' + str(self.saveSegmentationVal)))
logPrint(('Save Probability Maps for each class = ' + str(self.saveProbMapsBoolPerClassVal)))
logPrint(('Filepaths to save results per case = ' + str(self.filepathsToSavePredictionsForEachPatientVal)))
logPrint(('Suffixes with which to save segmentations and probability maps = ' + str(self.suffixForSegmAndProbsDictVal)))
logPrint('~~Feature Maps~~')
logPrint(('Save Feature Maps = ' + str(self.saveIndividualFmImagesVal)))
logPrint(('Save FMs in a 4D-image = ' + str(self.saveMultidimensionalImageWithAllFmsVal)))
logPrint(('Min/Max Indices of FMs to visualise per pathway-type and per layer = ' + str(self.indices_fms_per_pathtype_per_layer_to_save)))
logPrint(('Filepaths to save FMs per case = ' + str(self.filepathsToSaveFeaturesForEachPatientVal)))
logPrint('~~Optimization~~')
logPrint(('Initial Learning rate = ' + str(self.learningRate)))
logPrint(('Optimizer to use: SGD(0), Adam(1), RmsProp(2) = ' + str(self.optimizerSgd0Adam1Rms2)))
logPrint(((((('Parameters for Adam: b1= ' + str(self.b1Adam)) + ', b2=') + str(self.b2Adam)) + ', e= ') + str(self.eAdam)))
logPrint(((('Parameters for RmsProp: rho= ' + str(self.rhoRms)) + ', e= ') + str(self.eRms)))
logPrint(('Momentum Type: Classic (0) or Nesterov (1) = ' + str(self.classicMom0Nesterov1)))
logPrint(('Momentum Non-Normalized (0) or Normalized (1) = ' + str(self.momNonNormalized0Normalized1)))
logPrint(('Momentum Value = ' + str(self.momentumValue)))
logPrint('~~Costs~~')
logPrint(('Loss functions and their weights = ' + str(self.losses_and_weights)))
logPrint(('Reweight samples in cost on a per-class basis = ' + str(self.reweight_classes_in_cost)))
logPrint(('L1 Regularization term = ' + str(self.L1_reg_weight)))
logPrint(('L2 Regularization term = ' + str(self.L2_reg_weight)))
logPrint(('Margin term = ' + str(self.marginm)))
logPrint(('Mixup Rate term = ' + str(self.mixup_rate)))
logPrint(('Mixup Margin term = ' + str(self.mixup_biasmargin)))
logPrint('~~Freeze Weights of Certain Layers~~')
logPrint('Indices of layers from each type of pathway that will be kept fixed (first layer is 0):')
logPrint(("Normal pathway's layers to freeze = " + str(self.indicesOfLayersPerPathwayTypeToFreeze[0])))
logPrint(("Subsampled pathway's layers to freeze = " + str(self.indicesOfLayersPerPathwayTypeToFreeze[1])))
logPrint(("FC pathway's layers to freeze = " + str(self.indicesOfLayersPerPathwayTypeToFreeze[2])))
logPrint('Other Generic Parameters')
logPrint(('Check whether input data has correct format (can slow down process) = ' + str(self.run_input_checks)))
logPrint('~~Pre Processing~~')
logPrint(('Pad Input Images = ' + str(self.padInputImagesBool)))
logPrint(" Done with printing session's parameters ")
logPrint('\n')
def get_args_for_train_routine(self):
args = [self.log, self.filepath_to_save_models, self.val_on_samples_during_train, {'segm': self.saveSegmentationVal, 'prob': self.saveProbMapsBoolPerClassVal}, self.filepathsToSavePredictionsForEachPatientVal, self.suffixForSegmAndProbsDictVal, self.channelsFilepathsTrain, self.channelsFilepathsVal, self.gtLabelsFilepathsTrain, self.providedGtVal, self.gtLabelsFilepathsVal, self.providedWeightMapsToSampleForEachCategoryTraining, self.forEachSamplingCategory_aListOfFilepathsToWeightMapsOfEachPatientTraining, self.providedWeightMapsToSampleForEachCategoryValidation, self.perSamplingCat_aListOfFilepathsToWeightMapsOfEachCaseVal, self.providedRoiMasksTrain, self.roiMasksFilepathsTrain, self.providedRoiMasksVal, self.roiMasksFilepathsVal, self.numberOfEpochs, self.numberOfSubepochs, self.numOfCasesLoadedPerSubepoch, self.segmentsLoadedOnGpuPerSubepochTrain, self.segmentsLoadedOnGpuPerSubepochVal, self.num_parallel_proc_sampling, self.samplingTypeInstanceTrain, self.samplingTypeInstanceVal, self.padInputImagesBool, self.mixup_rate, self.mixup_biasmargin, self.doIntAugm_shiftMuStd_multiMuStd, self.reflectImagesPerAxis, self.useSameSubChannelsAsSingleScale, self.subsampledChannelsFilepathsTrain, self.subsampledChannelsFilepathsVal, self.val_on_whole_volumes, self.num_epochs_between_val_on_whole_volumes, self.saveIndividualFmImagesVal, self.saveMultidimensionalImageWithAllFmsVal, self.indices_fms_per_pathtype_per_layer_to_save, self.filepathsToSaveFeaturesForEachPatientVal, self.run_input_checks, self.nepsilon, self.nxi, self.probaugmentbackground]
return args
def get_args_for_trainer(self):
args = [self.log, self.indicesOfLayersPerPathwayTypeToFreeze, self.losses_and_weights, self.L1_reg_weight, self.L2_reg_weight, self.reweight_classes_in_cost, self.marginm, self.mixup_biasmargin]
return args
def get_args_for_optimizer(self):
args = [self.log, self.optimizerSgd0Adam1Rms2, self.lr_sched_params, self.learningRate, self.momentumValue, self.classicMom0Nesterov1, self.momNonNormalized0Normalized1, self.b1Adam, self.b2Adam, self.eAdam, self.rhoRms, self.eRms]
return args
def get_args_for_adversarial(self):
args = [self.log, self.nside]
return args |
class GroupingOperation(Function):
def forward(ctx, features, idx):
(B, nfeatures, nsample) = idx.size()
(_, C, N) = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
def backward(ctx, grad_out):
(idx, N) = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return (grad_features, None) |
def remove_empty_line(original: str) -> str:
lines = original.splitlines()
c_lines = [x for x in lines if (not (x.strip() == ''))]
return '\n'.join(c_lines) |
def compute_eta_for_day(day, sc_parquet, supersegments, edge_maxspeeds_kph, edge_free_flows_kph, debug):
sc_df = pd.read_parquet(sc_parquet)
print(f'Read {len(sc_df)} rows from {sc_parquet}')
maxspeed_cnt = 0
edge_speeds = {}
for (uv, maxspeed) in edge_maxspeeds_kph.items():
if (uv in edge_free_flows_kph):
free_flow = edge_free_flows_kph[uv]
speeds = [free_flow for _ in range(96)]
sources = ['free_flow' for _ in range(96)]
else:
speeds = [maxspeed for _ in range(96)]
sources = ['maxspeed' for _ in range(96)]
maxspeed_cnt += 1
edge_speeds[uv] = {'speeds': speeds, 'sources': sources}
print(f'{maxspeed_cnt} / {len(edge_speeds)} edges only have maxspeed')
for t in range(0, 96):
tsc_df = sc_df[(sc_df['t'] == t)]
for (u, v, ms) in zip(tsc_df['u'], tsc_df['v'], tsc_df['median_speed_kph']):
esp = edge_speeds[(u, v)]
esp['speeds'][t] = ms
esp['sources'][t] = 'current'
print(' Start generating ETAs ...')
eta_comp_time = 0
usage_counts = {'cnt_current': 0, 'cnt_free_flow': 0, 'cnt_maxspeed': 0, 'cnt_speedtoolow': 0}
eta_results = []
for t in range(0, 96):
for s in supersegments:
tstart = time.process_time()
eta_info = compute_eta(s['edges'], edge_speeds, t, usage_counts=usage_counts, debug=debug)
eta_comp_time += (time.process_time() - tstart)
eta_result = {'identifier': s['identifier'], 'day': day, 't': t, 'eta': eta_info['eta_sec']}
if debug:
eta_result['edges'] = eta_info['edges']
eta_results.append(eta_result)
print(f' ... ETA comp took {eta_comp_time}s (on avg {(eta_comp_time / len(eta_results))}s per call)')
print(f'SuperSegments usage: {usage_counts}')
eta_df = pd.DataFrame(eta_results)
assert (eta_df['eta'].max() <= 3600.0)
return eta_df |
def mlp_constructor(dims, actv='Sigmoid', lastactv=True):
if (type(actv) is str):
actv = getattr(nn, actv)
if (len(dims) <= 1):
return nn.Sequential()
else:
return nn.Sequential(*((sum([[nn.Linear(dims[i], dims[(i + 1)]), actv()] for i in range((len(dims) - 2))], []) + [nn.Linear(dims[(- 2)], dims[(- 1)])]) + ([actv()] if lastactv else []))) |
class DwarvishMithrilCoat(BaseSuit):
def __init__(self):
super().__init__('dwarvish mithril-coat', weight=150, armour_class=6, material=M.Mithril) |
def get_cam_model(input_size: tuple=(224, 224, 3), num_classes: int=3, trainable_layers: int=1, dropout: float=0.5, log_softmax: bool=False, mc_dropout: bool=False, *args, **kwargs):
act_fn = (tf.nn.softmax if (not log_softmax) else tf.nn.log_softmax)
baseModel = VGG16(weights='imagenet', include_top=False, input_tensor=Input(shape=input_size))
headModel = baseModel.output
headModel = global_average_pooling(headModel)
headModel = (Dropout(dropout)(headModel, training=True) if mc_dropout else Dropout(dropout)(headModel))
headModel = Dense(num_classes, activation=act_fn)(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
model = fix_layers(model, num_flex_layers=(trainable_layers + 2))
return model |
def crawl_and_copy(current_folder, out_folder, prefix='fabian_', suffix='ummary.json'):
s = subdirs(current_folder, join=False)
f = subfiles(current_folder, join=False)
f = [i for i in f if i.endswith(suffix)]
if (current_folder.find('fold0') != (- 1)):
for fl in f:
shutil.copy(os.path.join(current_folder, fl), os.path.join(out_folder, (prefix + fl)))
for su in s:
if (prefix == ''):
add = su
else:
add = ('__' + su)
crawl_and_copy(os.path.join(current_folder, su), out_folder, prefix=(prefix + add)) |
def qualification_loss(x_minus, x_plus, y_minus, y_plus, a, b, c, confidence=(- 0.1)):
loss1 = ts.sigmoid_upper(torch.tanh(x_minus), b, ((a * x_minus) + c), y_minus, (y_plus * 0))
valid = (loss1 <= 0)
loss1 = torch.clamp(loss1, min=confidence)
loss2 = ts.sigmoid_upper(torch.tanh(x_plus), b, ((a * x_plus) + c), y_minus, (y_plus * 0))
valid = (valid * (loss2 <= 0))
loss2 = torch.clamp(loss2, min=confidence)
loss4 = ((torch.tanh(x_minus) * torch.sigmoid(y_plus)) - (((a * x_minus) + (b * y_plus)) + c))
valid = (valid * (loss4 <= 0))
loss4 = torch.clamp(loss4, min=confidence)
loss5 = ((torch.tanh(x_plus) * torch.sigmoid(y_plus)) - (((a * x_plus) + (b * y_plus)) + c))
valid = (valid * (loss5 <= 0))
loss5 = torch.clamp(loss5, min=confidence)
loss = (((loss1 + loss2) + loss4) + loss5)
return (loss, valid) |
def replace_layer(state_dict, keyword1, keyword2):
keys = [key for key in state_dict.keys()]
for key in keys:
if (keyword1 in key):
new_key = key.replace(keyword1, keyword2)
state_dict[new_key] = state_dict.pop(key)
return state_dict |
def tag_reference_line(line, kbs, record_titles_count):
working_line1 = wash_line(line)
working_line1 = tag_pos_volume(working_line1)
working_line1 = wash_line(working_line1)
working_line1 = tag_quoted_text(working_line1)
working_line1 = tag_isbn(working_line1)
working_line1 = tag_arxiv(working_line1)
working_line1 = tag_arxiv_more(working_line1)
working_line1 = tag_pos_volume(working_line1)
working_line1 = tag_atlas_conf(working_line1)
standardised_titles = kbs['journals'][1]
standardised_titles.update(kbs['journals_re'])
journals_matches = identifiy_journals_re(working_line1, kbs['journals_re'])
working_line2 = strip_tags(working_line1)
working_line2 = working_line2.upper()
working_line2 = re_punctuation.sub(u' ', working_line2)
(removed_spaces, working_line2) = remove_and_record_multiple_spaces_in_line(working_line2)
(found_pprint_repnum_matchlens, found_pprint_repnum_replstr, working_line2) = identify_report_numbers(working_line2, kbs['report-numbers'])
(journals_matches_more, working_line2, line_titles_count) = identify_journals(working_line2, kbs['journals'])
journals_matches.update(journals_matches_more)
record_titles_count = sum_2_dictionaries(record_titles_count, line_titles_count)
if (working_line2.upper().find(u'IBID') != (- 1)):
(found_ibids_matchtext, working_line2) = identify_ibids(working_line2)
journals_matches.update(found_ibids_matchtext)
publishers_matches = identify_publishers(working_line2, kbs['publishers'])
tagged_line = process_reference_line(working_line=working_line1, journals_matches=journals_matches, pprint_repnum_len=found_pprint_repnum_matchlens, pprint_repnum_matchtext=found_pprint_repnum_replstr, publishers_matches=publishers_matches, removed_spaces=removed_spaces, standardised_titles=standardised_titles, kbs=kbs)
return (tagged_line, record_titles_count) |
def create_ucf101_files_for_frames(folder_files: str, frames_folder: str):
if (not _HAS_PD):
raise ImportError('pandas is required to use this function.')
classes = {}
def get_video_class_index(video: str):
video = Path(video)
if (video.parent.name not in classes):
classes[video.parent.name] = len(classes)
return classes[video.parent.name]
for file in _UCF101_FILES:
file = (Path(folder_files) / file)
data = pandas.read_csv(file, sep=' ', header=None, names=['video', 'label'])
frames_folder = Path(frames_folder)
data['label'] = data.video.map(get_video_class_index)
data.video = data.video.map(remove_suffix)
data['duration'] = data.video.map(partial(get_raw_video_duration, frames_folder))
data.to_csv((frames_folder / file.name), sep=' ', header=None, index=None) |
class Dataset():
def __init__(self, raw_data: Dict):
self.raw_data = raw_data
self.metadata: EasyDict = EasyDict(raw_data['metadata'])
self.data: List[Dict] = raw_data['data']
def dataset_key(self):
return self.metadata['dataset_key']
def __len__(self):
return len(self.data)
def __getitem__(self, item: int) -> Dict:
return self.data[item]
def load(dataset_key):
with open(get_dataset_path(dataset_key), 'r') as f:
raw_data = json.load(f)
dataset = Dataset(raw_data)
if (dataset.metadata['dataset_key'] != dataset_key):
raise Exception('Dataset key mismatch.')
return dataset
def select_samples(self, sample_indices: List[int]) -> List[Dict]:
selected = []
for i in sample_indices:
if (len(self.data) <= i):
raise IndexError('Sample index {} out of range [0, {}).'.format(i, len(self.data)))
selected.append(self.data[i])
return selected |
(version='2.0')
def _prepare_inputs(pt_model, input_names, example_inputs):
if (isinstance(example_inputs, dict) or isinstance(example_inputs, UserDict)):
input_names = (input_names or list(example_inputs.keys()))
if isinstance(example_inputs, UserDict):
example_inputs = dict(example_inputs)
elif (input_names and (len(input_names) > 1)):
import inspect
input_order = inspect.signature(pt_model.forward).parameters.keys()
flag = [(name in input_order) for name in input_names]
if all(flag):
new_input_names = []
new_example_inputs = []
for name in input_order:
if (name in input_names):
new_input_names.append(name)
id = input_names.index(name)
new_example_inputs.append(example_inputs[id])
input_names = new_input_names
example_inputs = new_example_inputs
example_inputs = input2tuple(example_inputs)
return (input_names, example_inputs) |
class DeepAttentionWrapper(nn.Module):
def __init__(self, x1_dim, x2_dim, x3_dims, att_cnt, prefix='deep_att', opt=None, dropout=None):
super(DeepAttentionWrapper, self).__init__()
self.opt = ({} if (opt is None) else opt)
self.prefix = prefix
self.x1_dim = x1_dim
self.x2_dim = x2_dim
self.x3_dims = x3_dims
if (dropout is None):
self.dropout = DropoutWrapper(opt.get('{}_dropout_p'.format(self.prefix), 0))
else:
self.dropout = dropout
self.attn_list = nn.ModuleList()
for i in range(0, att_cnt):
if opt['multihead_on']:
attention = MultiheadAttentionWrapper(self.x1_dim, self.x2_dim, self.x3_dims[i], prefix, opt, dropout=dropout)
else:
attention = AttentionWrapper(self.x1_dim, self.x2_dim, self.x3_dims[i], prefix, opt, self.dropout)
self.attn_list.append(attention)
def forward(self, x1, x2, x3, x2_mask):
rvl = []
for i in range(0, len(x3)):
hiddens = self.attn_list[i](x1, x2, x3[i], x2_mask)
rvl.append(hiddens)
return torch.cat(rvl, 2) |
def stable_softmax(t, dim=(- 1)):
t = (t - t.amax(dim=dim, keepdim=True))
return t.softmax(dim=dim) |
class PolicyOutput(QtWidgets.QWidget):
def __init__(self, main_window, policy):
super().__init__()
self.main_window = main_window
self.policy = policy
self.action = None
self.q_value_map = None
self.q_value_map_image = QtWidgets.QLabel()
self.setWindowTitle('Policy Output')
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 10, 10, 10)
layout.addWidget(self.q_value_map_image)
use_policy_action_button = QtWidgets.QPushButton('Use policy action')
use_policy_action_button.clicked.connect(self.use_policy_action)
layout.addWidget(use_policy_action_button)
def use_policy_action(self):
self.main_window.store_new_action(*self.action)
def refresh(self):
if (not self.isVisible()):
return
state = self.main_window.env.get_state()
(action, info) = self.policy.step(state, debug=True)
policy_output = info['output']
for (i, g) in enumerate(state):
for (j, s) in enumerate(g):
if (s is not None):
self.action = (i, j, action[i][j])
overhead_image = utils.get_overhead_image(s)
o = policy_output[i][j]
o = utils.to_uint8_image(utils.scale_min_max(o))
policy_output_channels = [utils.get_output_visualization(overhead_image, o_chan) for o_chan in o]
policy_output_vis = utils.to_uint8_image(np.concatenate(policy_output_channels, axis=0))
self.q_value_map = policy_output_vis
self.q_value_map_image.setPixmap(image_to_pixmap(policy_output_vis))
def closeEvent(self, _):
self.main_window.show_policy_output.setChecked(False) |
class Preset(IntEnum):
Custom = 0
Default = 1
Hand = 2
HighAccuracy = 3
HighDensity = 4
MediumDensity = 5 |
def create_encoder(Module):
class Encoder(Module):
def __init__(self, *args, local_idx=None, multi_idx=None, conv_idx=None, fc_idx=None, **kwargs):
super().__init__(*args, **kwargs)
if (local_idx is None):
raise ValueError('`local_idx` must be set')
conv_idx = (conv_idx or local_idx)
self.local_idx = local_idx
self.multi_idx = multi_idx
self.conv_idx = conv_idx
self.fc_idx = fc_idx
def forward(self, x: torch.Tensor):
outs = super().forward(x, return_full_list=True)
if (len(outs) == 2):
(conv_out, fc_out) = outs
else:
(conv_before_out, res_out, conv_after_out, fc_out) = outs
conv_out = ((conv_before_out + res_out) + conv_after_out)
local_out = conv_out[self.local_idx]
if (self.multi_idx is not None):
multi_out = conv_out[self.multi_idx]
else:
multi_out = None
if (len(fc_out) > 0):
if (self.fc_idx is not None):
hidden_out = fc_out[self.fc_idx]
else:
hidden_out = None
global_out = fc_out[(- 1)]
else:
hidden_out = None
global_out = None
conv_out = conv_out[self.conv_idx]
return (local_out, conv_out, multi_out, hidden_out, global_out)
return Encoder |
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs} |
class ImageDataset():
def __init__(self, image_path, resize=None):
self.image_path = image_path
self.name = image_path.split('/')[(- 1)]
with open(image_path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if (resize is not None):
transform_resize = pth_transforms.Compose([pth_transforms.ToTensor(), pth_transforms.Resize(resize), pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
img = transform_resize(img)
self.img_size = list(img.shape[(- 1):(- 3):(- 1)])
else:
img = transform(img)
self.img_size = list(img.shape[(- 1):(- 3):(- 1)])
self.dataloader = [[img, image_path]]
def get_image_name(self, *args, **kwargs):
return self.image_path.split('/')[(- 1)].split('.')[0]
def load_image(self, *args, **kwargs):
return Image.open(self.image_path).convert('RGB').resize(self.img_size) |
class BlenderbotSmallPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def register_all_pascal_voc(root='datasets'):
SPLITS = [('voc_2007_trainval', 'VOC2007', 'trainval'), ('voc_2007_train', 'VOC2007', 'train'), ('voc_2007_val', 'VOC2007', 'val'), ('voc_2007_test', 'VOC2007', 'test'), ('voc_2012_trainval', 'VOC2012', 'trainval'), ('voc_2012_train', 'VOC2012', 'train'), ('voc_2012_val', 'VOC2012', 'val')]
for (name, dirname, split) in SPLITS:
year = (2007 if ('2007' in name) else 2012)
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = 'pascal_voc' |
def create_reconstruction_model(energy_mdl):
x_in = Input(batch_shape=energy_mdl.input_shape)
x = GaussianNoise(stddev=0.5)(x_in)
energy = energy_mdl(x)
rec = Lambda((lambda args: (args[1] - K.gradients(args[0], args[1]))), output_shape=energy_mdl.input_shape[1:])([energy, x])
return Model(x_in, rec) |
def generate_paper_results(configurations, mode='experiment', save_dir=None, determinize=False):
results_list = []
data_dict = {}
for (mangle_method, article_section_set, current_config) in configurations:
if (current_config.article_sections not in data_dict):
data_dict[current_config.article_sections] = get_data(article_section_set, mode=mode, include_sentence_span_splits=((current_config.data_config == 'scan_net') or (current_config.data_config == 'scan_net_ICO')))
(real_train_Xy, real_val_Xy, real_test_Xy, inference_vectorizer) = data_dict[current_config.article_sections]
print('Current configuration: ', current_config)
(best_model, val_metrics, attn_metrics, final_test_preds) = run(real_train_Xy, real_val_Xy, real_test_Xy, inference_vectorizer, mangle_method, current_config, determinize=determinize)
results_list.append((val_metrics, attn_metrics))
if (save_dir is not None):
config_name = str(current_config).replace(' ', '').replace('(', '').replace(')', '').replace('Config', '').replace('article_sections', 'as').replace('ico_encoder', 'icoe').replace('tokenwise_attention', 'twattn').replace('cond_attn', 'ca').replace('batch_size', 'bs').replace('data_config', 'dc').replace('pretrain_attention', 'pta').replace('tune_embeddings', 'te').replace('no_pretrained_word_embeddings', 'nptwe').replace('False', 'F').replace('True', 'T').replace("'", '').replace('"', '')
torch.save(best_model, os.path.join(save_dir, (config_name + '.pkl')), pickle_module=dill)
with open(os.path.join(save_dir, (config_name + '.decoded.csv')), 'w') as of:
for (_, p_id, pred) in final_test_preds:
of.write((','.join([str(p_id), str((pred - 1))]) + '\n'))
results_to_csv(current_config, val_metrics, attn_metrics, os.path.join(save_dir, (config_name + '.results.csv')))
return results_list |
def bilinear_form_Potts_C(X1, X2, couplings):
B = X1.shape[0]
N1 = couplings.shape[0]
N2 = couplings.shape[1]
out = np.zeros(B, dtype=curr_float)
out_buffer = np.zeros([B, N1], dtype=curr_float)
for b in prange(B):
for n1 in prange(N1):
for n2 in range(N2):
out_buffer[(b, n1)] += couplings[(n1, n2, X1[(b, n1)], X2[(b, n2)])]
out = np.sum(out_buffer, 1)
return out |
def train_model(model, dataset, evaluator, early_stop, logger, config):
logger.info('train start ... !')
early_stop.initialize()
(test_score, train_time) = model.train_model(dataset, evaluator, early_stop, logger, config)
return (test_score, train_time) |
def parse_multisite(line: str) -> Multisite:
line = drop_comment(line)
if (not line):
return None
words = line.split()
source_site = int(words[0])
dx = [int(x) for x in words[1::2]]
dy = [int(x) for x in words[2::2]]
return Multisite(source_site, dx, dy) |
def build_matcap_nodes(node_tree: bpy.types.NodeTree, image_path: str) -> None:
tex_coord_node = node_tree.nodes.new(type='ShaderNodeTexCoord')
vector_transform_node = node_tree.nodes.new(type='ShaderNodeVectorTransform')
mapping_node = node_tree.nodes.new(type='ShaderNodeMapping')
texture_image_node = create_texture_node(node_tree, image_path, True)
emmission_node = node_tree.nodes.new(type='ShaderNodeEmission')
output_node = node_tree.nodes.new(type='ShaderNodeOutputMaterial')
create_frame_node(node_tree, (tex_coord_node, vector_transform_node, mapping_node), name='MatCap UV', label='MatCap UV')
vector_transform_node.vector_type = 'VECTOR'
vector_transform_node.convert_from = 'OBJECT'
vector_transform_node.convert_to = 'CAMERA'
mapping_node.vector_type = 'TEXTURE'
if (bpy.app.version >= (2, 81, 0)):
mapping_node.inputs['Location'].default_value = (1.0, 1.0, 0.0)
mapping_node.inputs['Scale'].default_value = (2.0, 2.0, 1.0)
else:
mapping_node.translation = (1.0, 1.0, 0.0)
mapping_node.scale = (2.0, 2.0, 1.0)
node_tree.links.new(tex_coord_node.outputs['Normal'], vector_transform_node.inputs['Vector'])
node_tree.links.new(vector_transform_node.outputs['Vector'], mapping_node.inputs['Vector'])
node_tree.links.new(mapping_node.outputs['Vector'], texture_image_node.inputs['Vector'])
node_tree.links.new(texture_image_node.outputs['Color'], emmission_node.inputs['Color'])
node_tree.links.new(emmission_node.outputs['Emission'], output_node.inputs['Surface'])
arrange_nodes(node_tree) |
class FusedMBConv(nn.Module):
def __init__(self, cnf: FusedMBConvConfig, stochastic_depth_prob: float, norm_layer: Callable[(..., nn.Module)]) -> None:
super().__init__()
if (not (1 <= cnf.stride <= 2)):
raise ValueError('illegal stride value')
self.use_res_connect = ((cnf.stride == 1) and (cnf.input_channels == cnf.out_channels))
layers: collections.OrderedDict[(str, nn.Module)] = collections.OrderedDict()
activation_layer = nn.SiLU
expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
shifts = ((1, 1) if cnf.bottomright_stride else (0, 0))
layers['padding'] = fixed_padding_layer(cnf.kernel, shifts=shifts)
if (expanded_channels != cnf.input_channels):
layers[str((len(layers) - 1))] = Conv2dNormActivation(cnf.input_channels, expanded_channels, kernel_size=cnf.kernel, stride=cnf.stride, norm_layer=norm_layer, activation_layer=activation_layer, padding=0)
layers[str((len(layers) - 1))] = Conv2dNormActivation(expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None)
else:
layers[str((len(layers) - 1))] = Conv2dNormActivation(cnf.input_channels, cnf.out_channels, kernel_size=cnf.kernel, stride=cnf.stride, norm_layer=norm_layer, activation_layer=activation_layer, padding=0)
self.block = nn.Sequential(layers)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, 'row')
self.out_channels = cnf.out_channels
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result = self.stochastic_depth(result)
result += input
return result |
(before=[init], after=[post])
def con_train_wbglobal():
USR.set('dataset', 'data/wb_aligned/')
USR.set('decoder', 'crf')
USR.set('L', '8')
USR.set('layers', '2')
USR.set('min_epochs', '8')
USR.set('weight_decay', '0.0')
USR.set('posterior_reg', '1')
command = ('%(S_python_itrptr)s %(S_python_dir)s/train.py --data %(U_dataset)s --save %(S_model)s/{config} --save_out %(S_output)s/{config} --epoch 55 --data_mode real --optim_algo 1 --L %(U_L)s --decoder %(U_decoder)s --cuda --one_rnn --sep_attn --option train' % ALL())
command += ' --posterior_reg 1 --layers 2 --train_q_epoch 5 --weight_decay 0.0 --full_independence 3'
search_list = [('pr_reg_style', 'wb:global'), ('bsz', '10'), ('pr_coef', '25|15|5'), ('hard_code', 'no'), ('decoder_constraint', 'no'), ('encoder_constraint', 'yes'), ('tagset_size', '70'), ('max_mbs_per_epoch', '25000'), ('use_elmo', 'no'), ('elmo_style', '1'), ('seed', '0'), ('thresh', '1000'), ('hidden_dim', '500'), ('embedding_dim', '400'), ('lr_p', '0.0005'), ('lr_q', '0.001'), ('sample_size', '3'), ('dual_attn', 'yes'), ('trans_unif', 'yes')]
grid_search((lambda map: basic_func(command, map)), search_list, seed=1)
return |
def get_detection_weight(n):
a = ((n.sum() - n) / n)
w = (a * ((1 + a) / a).log())
return w[None] |
def lowercase_and_remove_accent(text):
text = ' '.join(text)
text = text.lower()
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output).lower().split(' ') |
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
with tf.name_scope((name or 'dropout')):
if (keep_prob < 1.0):
d = tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
out = tf.cond(is_train, (lambda : d), (lambda : x))
return out
return x |
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
def has_flat_params(self):
return (torch.is_tensor(self.fp32_params) or (isinstance(self.fp32_params, dict) and all((torch.is_tensor(t) for t in self.fp32_params.values()))))
def build_fp32_params(cls, args, params, flatten=True):
if flatten:
is_pipeline_parallel = (getattr(args, 'pipeline_model_parallel', False) and getattr(args, 'distributed_no_spawn', False))
total_param_size = sum((p.data.numel() for p in params))
devices = [torch.cuda.current_device()]
if is_pipeline_parallel:
devices = list(set(args.pipeline_devices))
fp32_params = {}
for device in devices:
if is_pipeline_parallel:
device_param_size = sum((p.data.numel() for p in params if (p.device.index == device)))
device_params = [p for p in params if (p.device.index == device)]
else:
device_param_size = total_param_size
device_params = params
fp32_params[device] = device_params[0].new(0).float().new(device_param_size)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset:(offset + numel)].copy_(p.data.view((- 1)))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(device_param_size)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, 'param_group'):
p32.param_group = p.param_group
fp32_params.append(p32)
return fp32_params
def state_dict(self):
state_dict = self.fp32_optimizer.state_dict()
if (self.scaler is not None):
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
if (('loss_scale' in state_dict) and (self.scaler is not None)):
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
if (self.scaler is not None):
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (p.grad.data if (p.grad is not None) else p.data.new_zeros(p.data.shape))
numel = grad_data.numel()
self.fp32_params[device].grad.data[offset:(offset + numel)].copy_(grad_data.view((- 1)))
offset += numel
else:
for (p, p32) in zip(self.fp16_params, self.fp32_params):
if (not p.requires_grad):
continue
if (p.grad is not None):
if (p32.grad is None):
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(self.fp32_params[device].data[offset:(offset + numel)].view_as(p.data))
offset += numel
else:
for (p, p32) in zip(self.fp16_params, self.fp32_params):
if (not p.requires_grad):
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (torch.is_tensor(self._multiply_factor) or (self._multiply_factor != 1.0)):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
self._sync_fp16_grads_to_fp32()
grad_norm = (self._multiply_factor * self.fp32_optimizer.clip_grad_norm(0, aggregate_norm_fn))
if (self.scaler is not None):
if (grad_norm > max_norm > 0.0):
self._multiply_factor *= (max_norm / grad_norm)
self.scaler.check_overflow(grad_norm)
elif (max_norm > 0.0):
clip_coef = (max_norm / (grad_norm + 1e-06)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
self._sync_fp16_grads_to_fp32()
if getattr(self, 'supports_step_with_scale', False):
self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if (self.scaler is not None):
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError('self.fp32_params must be a tensor or dict')
else:
for p32 in self.fp32_params:
if (p32.grad is not None):
p32.grad.zero_()
self._needs_sync = False
if (self.scaler is not None):
self._multiply_factor = (1.0 / float(self.scaler.loss_scale)) |
def extract_frames_method2(video_path):
video_path = video_path.replace('\n', '')
video_fname = Path(video_path).name
x = str((VIDEOS_DIR / video_path))
vidcap = cv2.VideoCapture(x)
frame = 0
success = True
while success:
curr_frame_str = str(frame).zfill(6)
vidcap.set(cv2.CAP_PROP_POS_MSEC, (frame * 1000))
(success, image) = vidcap.read()
if (not success):
break
if ((frame % RATE) == 0):
frame_save_path = ((FRAMES_SAVE_DIR / video_fname) / f'frame_{curr_frame_str}.jpg')
if (not os.path.exists(frame_save_path.parent)):
os.makedirs(frame_save_path.parent)
frame_save_path = str(frame_save_path)
cv2.imwrite(frame_save_path, image)
frame += 1
return True |
class DummyDataset(data.Dataset):
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
if (getattr(self.tokenizer, 'pad_token', None) is None):
self.tokenizer.pad_token = self.tokenizer.eos_token
self.max_prompt_length = 128
self.max_length = 256
question = 'you are a AI assistant that created by Intel.'
chosen = 'intel-extension-for-transformers is based in SH'
self.encoded_dict = {}
query = (('Question: ' + question) + '\n\nAnswer: ')
tokenized_question = self.tokenizer(query, truncation=True)
self.encoded_dict['query'] = query
self.encoded_dict['input_ids'] = torch.tensor(tokenized_question['input_ids'])
def __len__(self):
return 10
def __getitem__(self, index):
if (index < 10):
return self.encoded_dict |
def get_model_parallel_world_size():
return torch.distributed.get_world_size(group=get_model_parallel_group()) |
class EfficientNet(nn.Module):
def __init__(self, inverted_residual_setting: Sequence[Union[(MBConvConfig, FusedMBConvConfig)]], dropout: float, stochastic_depth_prob: float=0.2, num_classes: int=1000, norm_layer: Optional[Callable[(..., nn.Module)]]=None, last_channel: Optional[int]=None, **kwargs: Any) -> None:
super().__init__()
_log_api_usage_once(self)
if (not inverted_residual_setting):
raise ValueError('The inverted_residual_setting should not be empty')
elif (not (isinstance(inverted_residual_setting, Sequence) and all([isinstance(s, _MBConvConfig) for s in inverted_residual_setting]))):
raise TypeError('The inverted_residual_setting should be List[MBConvConfig]')
if ('block' in kwargs):
warnings.warn("The parameter 'block' is deprecated since 0.13 and will be removed 0.15. Please pass this information on 'MBConvConfig.block' instead.")
if (kwargs['block'] is not None):
for s in inverted_residual_setting:
if isinstance(s, MBConvConfig):
s.block = kwargs['block']
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
layers: collections.OrderedDict[(str, nn.Module)] = collections.OrderedDict()
firstconv_output_channels = inverted_residual_setting[0].input_channels
layers['padding'] = fixed_padding_layer(3)
layers[str((len(layers) - 1))] = Conv2dNormActivation(3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.SiLU, padding=0)
total_stage_blocks = sum((cnf.num_layers for cnf in inverted_residual_setting))
stage_block_id = 0
for cnf in inverted_residual_setting:
stage: List[nn.Module] = []
for _ in range(cnf.num_layers):
block_cnf = copy.copy(cnf)
if stage:
block_cnf.input_channels = block_cnf.out_channels
block_cnf.stride = 1
block_cnf.bottomright_stride = False
sd_prob = ((stochastic_depth_prob * float(stage_block_id)) / total_stage_blocks)
stage.append(block_cnf.block(block_cnf, sd_prob, norm_layer))
stage_block_id += 1
layers[str((len(layers) - 1))] = nn.Sequential(*stage)
lastconv_input_channels = inverted_residual_setting[(- 1)].out_channels
lastconv_output_channels = (last_channel if (last_channel is not None) else (4 * lastconv_input_channels))
layers[str((len(layers) - 1))] = Conv2dNormActivation(lastconv_input_channels, lastconv_output_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.SiLU)
self.features = nn.Sequential(layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(lastconv_output_channels, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init_range = (1.0 / math.sqrt(m.out_features))
nn.init.uniform_(m.weight, (- init_range), init_range)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x) |
class ResNet18_128(ResNetBase):
BLOCK = BasicBlock
PLANES = (128, 128, 256, 512)
LAYERS = (2, 2, 2, 2) |
class CaffeSoftmaxLayer(CaffeLayerGenerator):
def __init__(self, name):
super(CaffeSoftmaxLayer, self).__init__(name, 'Softmax')
def write(self, f):
f.write(self.get_template().format('')) |
def configurable(init_func):
assert (init_func.__name__ == '__init__'), ' should only be used for __init__!'
if init_func.__module__.startswith('detectron2.'):
assert ((init_func.__doc__ is not None) and ('experimental' in init_func.__doc__)), f'configurable {init_func} should be marked experimental'
(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError:
raise AttributeError("Class with must have a 'from_config' classmethod.")
if (not inspect.ismethod(from_config_func)):
raise TypeError("Class with must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped |
def test_python_inherit_from_mi():
class PyMVF(m.MVF):
g = 7
def get_g_g(self):
return self.g
o = PyMVF()
assert (o.b == 1)
assert (o.c == 2)
assert (o.d0 == 3)
assert (o.d1 == 4)
assert (o.e == 5)
assert (o.f == 6)
assert (o.g == 7)
assert (o.get_g_g() == 7) |
class SegnetEncoder(nn.Module):
def __init__(self, in_channels=3, is_unpooling=True):
super(SegnetEncoder, self).__init__()
self.in_channels = in_channels
self.is_unpooling = is_unpooling
self.down1 = segnetDown2(self.in_channels, 64)
self.down2 = segnetDown2(64, 128)
self.down3 = segnetDown3(128, 256)
self.down4 = segnetDown3(256, 512)
self.down5 = segnetDown3(512, 512)
self.up5 = segnetUp3(512, 512)
self.up4 = segnetUp3(512, 256)
self.up3 = segnetUp3(256, 128)
self.up2 = segnetUp2(128, 64)
def forward(self, inputs, masks):
(down1, indices_1, unpool_shape1) = self.down1(inputs)
(down2, indices_2, unpool_shape2) = self.down2(down1)
(down3, indices_3, unpool_shape3) = self.down3(down2)
(down4, indices_4, unpool_shape4) = self.down4(down3)
(down5, indices_5, unpool_shape5) = self.down5(down4)
up5 = self.up5(down5, indices_5, unpool_shape5)
up4 = self.up4(up5, indices_4, unpool_shape4)
up3 = self.up3(up4, indices_3, unpool_shape3)
up2 = self.up2(up3, indices_2, unpool_shape2)
return ([up2, indices_1, unpool_shape1], masks)
def init_vgg16_params(self, vgg16):
blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]
ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
features = list(vgg16.features.children())
vgg_layers = []
for _layer in features:
if isinstance(_layer, nn.Conv2d):
vgg_layers.append(_layer)
merged_layers = []
for (idx, conv_block) in enumerate(blocks):
if (idx < 2):
units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]
else:
units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit, conv_block.conv3.cbr_unit]
for _unit in units:
for _layer in _unit:
if isinstance(_layer, nn.Conv2d):
merged_layers.append(_layer)
assert (len(vgg_layers) == len(merged_layers))
for (l1, l2) in zip(vgg_layers, merged_layers):
if (isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d)):
assert (l1.weight.size() == l2.weight.size())
assert (l1.bias.size() == l2.bias.size())
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data |
def test_env_render_result_is_immutable():
from six import string_types
environs = [envs.make('Taxi-v2'), envs.make('FrozenLake-v0'), envs.make('Reverse-v0')]
for env in environs:
env.reset()
output = env.render(mode='ansi')
assert isinstance(output, string_types)
env.close() |
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = (trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size)
dtype = trt.nptype(engine.get_binding_dtype(binding))
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(device_mem))
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return (inputs, outputs, bindings, stream) |
def get_neighbor_index(i, j):
neighbor_matrix_ids = []
if ((j % 2) == 0):
neighbor_matrix_ids = [[(i - 1), j], [i, (j + 1)], [(i + 1), (j + 1)], [(i + 1), j], [(i + 1), (j - 1)], [i, (j - 1)]]
elif ((j % 2) == 1):
neighbor_matrix_ids = [[(i - 1), j], [(i - 1), (j + 1)], [i, (j + 1)], [(i + 1), j], [i, (j - 1)], [(i - 1), (j - 1)]]
return neighbor_matrix_ids |
class Storage(abc.ABC):
def capacity(self):
return self._capacity
def size(self):
return self._size
def starts(self):
return self._starts
def ends(self):
return self._ends
def lengths(self):
return self._lengths
def bytes(self):
return get_bytes(self._buffers)
def save(self, path):
assert (self._size != 0), 'Trying to save Storage with no data.'
assert path.endswith('.npz'), 'Path given to `save` was bad. Must save in .npz format.'
data = utils.get_from_batch(self._buffers, 0, self._size)
os.makedirs(os.path.dirname(path), exist_ok=True)
return save_data(data, path)
def __getitem__(self, key):
return self._buffers[key]
def __getattr__(self, name):
return getattr(self._buffers, name)
def __contains__(self, key):
return (key in self._buffers)
def add(self, data):
raise NotImplementedError
def extend(self, data):
raise NotImplementedError |
class ImagesViewer(object):
def __init__(self, temp_dir=None):
if (temp_dir is None):
temp_dir = tempfile.gettempdir()
self.temp_dir = tempfile.mkdtemp(dir=temp_dir)
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
os.mkdir(self.temp_dir)
self.count = 0
self.filenames = []
def add_image(self, image):
filename = os.path.join(self.temp_dir, f'{self.count:09d}.png')
self.filenames.append(filename)
cv2.imwrite(filename, image)
self.count += 1
def add_image_filename(self, image_filename):
self.filenames.append(image_filename)
self.count += 1
def view_as_slider(self):
display_as_slider(self)
def view_as_video(self, delay_secs):
display_as_video(self, delay_secs=delay_secs)
def view_as_list(self, legends):
for (filename, legend) in zip(self.filenames, legends):
print(legend)
display(Image(open(filename, 'rb').read())) |
class ParserManager(object):
def __init__(self, grammar_dir):
if DEBUG_PARSER:
self.parser_file_manager = ParserFileManager(grammar_dir)
self.cache_dir = self.parser_file_manager.cache_dir
self.grammar_dir = self.parser_file_manager.grammar_dir
self.save_threads = self.parser_file_manager.save_threads
else:
self.init_parser = grammarinitParser(semantics=grammarinitModelBuilderSemantics())
self.default_parser = grammardefaultParser(semantics=grammardefaultModelBuilderSemantics())
def get_parser(self, key, grammar, extra_dict={}):
if DEBUG_PARSER:
return self.parser_file_manager.get_parser(key, grammar, extra_dict)
else:
if (key == 'init'):
return self.init_parser
self.modify_default_parser(extra_dict)
return self.default_parser
def set_test_mode(self):
if DEBUG_PARSER:
self.parser_file_manager.set_test_mode()
def reload(self):
if DEBUG_PARSER:
self.parser_file_manager.reload()
def modify_default_parser(self, extra_dict):
self.default_parser.new_id_list = []
self.default_parser.new_func_list = []
self.default_parser.builtin_list = []
self.default_parser.const_e = False
if ('ids' in extra_dict):
self.default_parser.new_id_list = extra_dict['ids']
if ('funcs' in extra_dict):
self.default_parser.new_func_list = extra_dict['funcs']
if ('pkg' in extra_dict):
funcs_list = extra_dict['pkg']
if ('e' in funcs_list):
self.default_parser.const_e = True
funcs_list.remove('e')
self.default_parser.builtin_list = funcs_list |
def main():
(train_loader, test_loader, criterion, model, optimizer, scheduler, starting_epoch, logfilename, model_path, device, writer) = prologue(args)
for epoch in range(starting_epoch, args.epochs):
before = time.time()
train_loss = train(train_loader, model, optimizer, epoch, args.noise_sd, device, writer)
(test_loss, test_acc) = test(test_loader, model, criterion, epoch, args.noise_sd, device, writer, args.print_freq)
after = time.time()
log(logfilename, '{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(epoch, (after - before), scheduler.get_lr()[0], train_loss, 0.0, test_loss, test_acc))
scheduler.step(epoch)
torch.save({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, model_path) |
class FactorizedConv2DTucker(Layer):
def __init__(self, filters, kernel_size, input_components=None, output_components=None, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, pre_kernel_initializer='glorot_uniform', kernel_initializer='glorot_uniform', post_kernel_initializer='glorot_uniform', bias_initializer='zeros', pre_kernel_regularizer=None, kernel_regularizer=None, post_kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, pre_kernel_constraint=None, kernel_constraint=None, post_kernel_constraint=None, bias_constraint=None, **kwargs):
super(FactorizedConv2DTucker, self).__init__(**kwargs)
rank = 2
self.rank = rank
self.input_components = input_components
self.output_components = output_components
self.filters = filters
self.output_components = output_components
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = K.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.pre_kernel_initializer = initializers.get(pre_kernel_initializer)
self.kernel_initializer = initializers.get(kernel_initializer)
self.post_kernel_initializer = initializers.get(post_kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.pre_kernel_regularizer = regularizers.get(pre_kernel_regularizer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.post_kernel_regularizer = regularizers.get(post_kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.pre_kernel_constraint = constraints.get(pre_kernel_constraint)
self.kernel_constraint = constraints.get(kernel_constraint)
self.post_kernel_constraint = constraints.get(post_kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=(rank + 2))
def build(self, input_shape):
if (self.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
if (input_shape[channel_axis] is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
if (self.input_components is None):
input_components = input_dim
else:
input_components = self.input_components
if (self.output_components is None):
output_components = self.filters
else:
output_components = self.output_components
kernel_shape = (self.kernel_size + (input_components, output_components))
if (self.input_components is None):
self.pre_kernel = None
else:
pre_kernel_shape = ((1, 1) + (input_dim, self.input_components))
self.pre_kernel = self.add_weight(pre_kernel_shape, initializer=self.pre_kernel_initializer, name='pre_kernel', regularizer=self.pre_kernel_regularizer, constraint=self.pre_kernel_constraint)
self.kernel = self.add_weight(kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
if (self.output_components is None):
self.post_kernel = None
else:
post_kernel_shape = ((1, 1) + (self.output_components, self.filters))
self.post_kernel = self.add_weight(post_kernel_shape, initializer=self.post_kernel_initializer, name='post_kernel', regularizer=self.post_kernel_regularizer, constraint=self.post_kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=(self.rank + 2), axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
h = inputs
if (self.pre_kernel is not None):
h = K.conv2d(h, self.pre_kernel, strides=(1, 1), padding='valid', data_format=self.data_format, dilation_rate=(1, 1))
h = K.conv2d(h, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate)
if (self.post_kernel is not None):
h = K.conv2d(h, self.post_kernel, strides=(1, 1), padding='valid', data_format=self.data_format, dilation_rate=(1, 1))
outputs = h
if self.use_bias:
outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)
if (self.activation is not None):
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if (self.data_format == 'channels_last'):
space = input_shape[1:(- 1)]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (((input_shape[0],) + tuple(new_space)) + (self.filters,))
if (self.data_format == 'channels_first'):
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i])
new_space.append(new_dim)
return ((input_shape[0], self.filters) + tuple(new_space))
def get_config(self):
config = {'input_components': self.input_components, 'output_components': self.output_components, 'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'pre_kernel_initializer': initializers.serialize(self.pre_kernel_initializer), 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'post_kernel_initializer': initializers.serialize(self.post_kernel_initializer), 'bias_initializer': initializers.serialize(self.kernel_initializer), 'pre_kernel_regularizer': regularizers.serialize(self.pre_kernel_regularizer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'post_kernel_regularizer': regularizers.serialize(self.post_kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'pre_kernel_constraint': constraints.serialize(self.pre_kernel_constraint), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'post_kernel_constraint': constraints.serialize(self.post_kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint)}
base_config = super(FactorizedConv2DTucker, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
class ImageSurrogate(ImageImputer):
def __init__(self, surrogate, width, height, superpixel_size):
super().__init__(width, height, superpixel_size)
self.surrogate = surrogate
def train(self, train_data, val_data, batch_size, max_epochs, loss_fn, validation_samples=1, validation_batch_size=None, lr=0.001, min_lr=1e-05, lr_factor=0.5, lookback=5, training_seed=None, validation_seed=None, num_workers=0, bar=False, verbose=False):
if isinstance(train_data, tuple):
(x_train, y_train) = train_data
if isinstance(x_train, np.ndarray):
x_train = torch.tensor(x_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32)
train_set = TensorDataset(x_train, y_train)
elif isinstance(train_data, Dataset):
train_set = train_data
else:
raise ValueError('train_data must be either tuple of tensors or a PyTorch Dataset')
random_sampler = RandomSampler(train_set, replacement=True, num_samples=(int(np.ceil((len(train_set) / batch_size))) * batch_size))
batch_sampler = BatchSampler(random_sampler, batch_size=batch_size, drop_last=True)
train_loader = DataLoader(train_set, batch_sampler=batch_sampler, pin_memory=True, num_workers=num_workers)
sampler = UniformSampler(self.num_players)
if (validation_seed is not None):
torch.manual_seed(validation_seed)
S_val = sampler.sample((len(val_data) * validation_samples))
if isinstance(val_data, tuple):
(x_val, y_val) = val_data
if isinstance(x_val, np.ndarray):
x_val = torch.tensor(x_val, dtype=torch.float32)
y_val = torch.tensor(y_val, dtype=torch.float32)
x_val_repeat = x_val.repeat(validation_samples, 1, 1, 1)
y_val_repeat = y_val.repeat(validation_samples, 1)
val_set = TensorDataset(x_val_repeat, y_val_repeat, S_val)
elif isinstance(val_data, Dataset):
val_set = DatasetRepeat([val_data, TensorDataset(S_val)])
else:
raise ValueError('val_data must be either tuple of tensors or a PyTorch Dataset')
if (validation_batch_size is None):
validation_batch_size = batch_size
val_loader = DataLoader(val_set, batch_size=validation_batch_size, pin_memory=True, num_workers=num_workers)
surrogate = self.surrogate
device = next(surrogate.parameters()).device
optimizer = optim.Adam(surrogate.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=lr_factor, patience=(lookback // 2), min_lr=min_lr, verbose=verbose)
best_loss = validate(self, loss_fn, val_loader).item()
best_epoch = 0
best_model = deepcopy(surrogate)
loss_list = [best_loss]
if (training_seed is not None):
torch.manual_seed(training_seed)
for epoch in range(max_epochs):
if bar:
batch_iter = tqdm(train_loader, desc='Training epoch')
else:
batch_iter = train_loader
for (x, y) in batch_iter:
x = x.to(device)
y = y.to(device)
S = sampler.sample(batch_size).to(device=device)
pred = self.__call__(x, S)
loss = loss_fn(pred, y)
loss.backward()
optimizer.step()
surrogate.zero_grad()
self.surrogate.eval()
val_loss = validate(self, loss_fn, val_loader).item()
self.surrogate.train()
if verbose:
print('----- Epoch = {} -----'.format((epoch + 1)))
print('Val loss = {:.4f}'.format(val_loss))
print('')
scheduler.step(val_loss)
loss_list.append(val_loss)
if (val_loss < best_loss):
best_loss = val_loss
best_model = deepcopy(surrogate)
best_epoch = epoch
if verbose:
print('New best epoch, loss = {:.4f}'.format(val_loss))
print('')
elif ((epoch - best_epoch) == lookback):
if verbose:
print('Stopping early')
break
for (param, best_param) in zip(surrogate.parameters(), best_model.parameters()):
param.data = best_param.data
self.loss_list = loss_list
self.surrogate.eval()
def train_original_model(self, train_data, val_data, original_model, batch_size, max_epochs, loss_fn, validation_samples=1, validation_batch_size=None, lr=0.001, min_lr=1e-05, lr_factor=0.5, lookback=5, training_seed=None, validation_seed=None, num_workers=0, bar=False, verbose=False):
if isinstance(train_data, np.ndarray):
train_data = torch.tensor(train_data, dtype=torch.float32)
if isinstance(train_data, torch.Tensor):
train_set = TensorDataset(train_data)
elif isinstance(train_data, Dataset):
train_set = train_data
else:
raise ValueError('train_data must be either tensor or a PyTorch Dataset')
random_sampler = RandomSampler(train_set, replacement=True, num_samples=(int(np.ceil((len(train_set) / batch_size))) * batch_size))
batch_sampler = BatchSampler(random_sampler, batch_size=batch_size, drop_last=True)
train_loader = DataLoader(train_set, batch_sampler=batch_sampler, pin_memory=True, num_workers=num_workers)
sampler = UniformSampler(self.num_players)
if (validation_seed is not None):
torch.manual_seed(validation_seed)
S_val = sampler.sample((len(val_data) * validation_samples))
if (validation_batch_size is None):
validation_batch_size = batch_size
if isinstance(val_data, np.ndarray):
val_data = torch.tensor(val_data, dtype=torch.float32)
if isinstance(val_data, torch.Tensor):
y_val = generate_labels(TensorDataset(val_data), original_model, validation_batch_size, num_workers)
y_val_repeat = y_val.repeat(validation_samples, *[1 for _ in y_val.shape[1:]])
val_data_repeat = val_data.repeat(validation_samples, 1, 1, 1)
val_set = TensorDataset(val_data_repeat, y_val_repeat, S_val)
elif isinstance(val_data, Dataset):
y_val = generate_labels(val_data, original_model, validation_batch_size, num_workers)
y_val_repeat = y_val.repeat(validation_samples, *[1 for _ in y_val.shape[1:]])
val_set = DatasetRepeat([val_data, TensorDataset(y_val_repeat, S_val)])
else:
raise ValueError('val_data must be either tuple of tensors or a PyTorch Dataset')
val_loader = DataLoader(val_set, batch_size=validation_batch_size, pin_memory=True, num_workers=num_workers)
surrogate = self.surrogate
device = next(surrogate.parameters()).device
optimizer = optim.Adam(surrogate.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=lr_factor, patience=(lookback // 2), min_lr=min_lr, verbose=verbose)
best_loss = validate(self, loss_fn, val_loader).item()
best_epoch = 0
best_model = deepcopy(surrogate)
loss_list = [best_loss]
if (training_seed is not None):
torch.manual_seed(training_seed)
for epoch in range(max_epochs):
if bar:
batch_iter = tqdm(train_loader, desc='Training epoch')
else:
batch_iter = train_loader
for (x,) in batch_iter:
x = x.to(device)
with torch.no_grad():
y = original_model(x)
S = sampler.sample(batch_size).to(device=device)
pred = self.__call__(x, S)
loss = loss_fn(pred, y)
loss.backward()
optimizer.step()
surrogate.zero_grad()
self.surrogate.eval()
val_loss = validate(self, loss_fn, val_loader).item()
self.surrogate.train()
if verbose:
print('----- Epoch = {} -----'.format((epoch + 1)))
print('Val loss = {:.4f}'.format(val_loss))
print('')
scheduler.step(val_loss)
loss_list.append(val_loss)
if (val_loss < best_loss):
best_loss = val_loss
best_model = deepcopy(surrogate)
best_epoch = epoch
if verbose:
print('New best epoch, loss = {:.4f}'.format(val_loss))
print('')
elif ((epoch - best_epoch) == lookback):
if verbose:
print('Stopping early')
break
for (param, best_param) in zip(surrogate.parameters(), best_model.parameters()):
param.data = best_param.data
self.loss_list = loss_list
self.surrogate.eval()
def __call__(self, x, S):
S = self.resize(S)
return self.surrogate((x, S)) |
def imagenet1k(args, distributed=False):
train_dirs = args.train_dirs
val_dirs = args.val_dirs
batch_size = args.batch_size
val_batch_size = args.val_batch_size
num_workers = args.num_workers
color_jitter = args.color_jitter
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
process = [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip()]
if color_jitter:
process += [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)]
process += [transforms.ToTensor(), normalize]
transform_train = transforms.Compose(process)
train_set = datasets.ImageFolder(train_dirs, transform=transform_train)
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=(train_sampler is None), sampler=train_sampler, num_workers=num_workers, pin_memory=True)
transform_val = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
val_set = datasets.ImageFolder(root=val_dirs, transform=transform_val)
if distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(val_set)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(val_set, batch_size=val_batch_size, shuffle=False, sampler=val_sampler, num_workers=num_workers, pin_memory=True)
return (train_loader, train_sampler, val_loader, val_sampler) |
def pts_to_distogram(pts: torch.Tensor, min_bin: torch.types.Number=2.3125, max_bin: torch.types.Number=21.6875, no_bins: int=64) -> torch.Tensor:
boundaries = torch.linspace(min_bin, max_bin, (no_bins - 1), device=pts.device)
dists = torch.sqrt(torch.sum(((pts.unsqueeze((- 2)) - pts.unsqueeze((- 3))) ** 2), dim=(- 1)))
return torch.bucketize(dists, boundaries) |
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = onmt.modules.LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return (output + x) |
class ResNet18(ClassificationBase):
def get_params_and_calculation_from_channel_num(self, channel_num, num_classes, ori_size):
def get_input_size(index):
size = ori_size
if (not isinstance(size, int)):
size = size[0]
if ((index >= 0) and (index <= 10)):
return size
elif ((index >= 11) and (index <= 20)):
return (size / 2)
elif ((index >= 21) and (index <= 30)):
return (size / 4)
else:
raise ValueError('unknown layer index')
return size
projection_shortcut_index = [10, 20]
params = 0
for (i, output_channel) in enumerate(channel_num):
input_channel = (3 if (i == 0) else channel_num[(i - 1)])
kernel_size = 3
params += ((((kernel_size * kernel_size) * input_channel) * output_channel) + output_channel)
params += ((channel_num[(- 1)] * num_classes) + num_classes)
calculation = 0
for (i, output_channel) in enumerate(channel_num):
input_channel = (3 if (i == 0) else channel_num[(i - 1)])
input_size = get_input_size(i)
kernel_size = 3
calculation += (((2 * (input_size ** 2)) * input_channel) * (output_channel * (kernel_size ** 2)))
calculation += ((2 * channel_num[(- 1)]) * num_classes)
print('params: ', params, ' calculation: ', calculation)
return (params, calculation)
def get_weights_from_model(self, model_path, resnet_verison=1):
reader = tf.train.NewCheckpointReader(model_path)
all_variables = reader.get_variable_to_shape_map()
kernel_weights = collections.OrderedDict()
kernel_weights['conv0/conv2d/kernel'] = reader.get_tensor('resnet18/conv0/conv2d/kernel')
kernel_weights['conv0/conv2d/beta'] = reader.get_tensor('resnet18/conv0/batch_normalization/beta')
kernel_weights['conv0/conv2d/gamma'] = reader.get_tensor('resnet18/conv0/batch_normalization/gamma')
kernel_weights['conv0/conv2d/moving_mean'] = reader.get_tensor('resnet18/conv0/batch_normalization/moving_mean')
kernel_weights['conv0/conv2d/moving_variance'] = reader.get_tensor('resnet18/conv0/batch_normalization/moving_variance')
for i in range(1, 5):
for j in range(2):
for k in range(1, 3):
kw_prefix = ('block%d/sub_block%d/m%d/conv2d' % (i, j, k))
t_prefix = ('resnet18/block%d/sub_block%d/m%d' % (i, j, k))
kernel_weights[(kw_prefix + '/kernel')] = reader.get_tensor((t_prefix + '/conv2d/kernel'))
kernel_weights[(kw_prefix + '/beta')] = reader.get_tensor((t_prefix + '/batch_normalization/beta'))
kernel_weights[(kw_prefix + '/gamma')] = reader.get_tensor((t_prefix + '/batch_normalization/gamma'))
kernel_weights[(kw_prefix + '/moving_mean')] = reader.get_tensor((t_prefix + '/batch_normalization/moving_mean'))
kernel_weights[(kw_prefix + '/moving_variance')] = reader.get_tensor((t_prefix + '/batch_normalization/moving_variance'))
kw_prefix = ('block%d/sub_block%d/shortcut/conv2d' % (i, j))
s_prefix = ('resnet18/block%d/sub_block%d/shortcut' % (i, j))
if ((s_prefix + '/conv2d/kernel') in all_variables):
kernel_weights[(kw_prefix + '/kernel')] = reader.get_tensor((s_prefix + '/conv2d/kernel'))
kernel_weights[(kw_prefix + '/beta')] = reader.get_tensor((s_prefix + '/batch_normalization/beta'))
kernel_weights[(kw_prefix + '/gamma')] = reader.get_tensor((s_prefix + '/batch_normalization/gamma'))
kernel_weights[(kw_prefix + '/moving_mean')] = reader.get_tensor((s_prefix + '/batch_normalization/moving_mean'))
kernel_weights[(kw_prefix + '/moving_variance')] = reader.get_tensor((s_prefix + '/batch_normalization/moving_variance'))
kernel_weights['dense/kernel'] = reader.get_tensor('resnet18/dense/dense/kernel')
kernel_weights['dense/bias'] = reader.get_tensor('resnet18/dense/dense/bias')
return kernel_weights
def restore_weights(self, scope, layer_type, weights_dict):
if ((layer_type == 'conv') or (layer_type == 'dense')):
prefix = ('/conv2d' if (layer_type == 'conv') else '/dense')
sk_prefix = ('/conv2d' if (layer_type == 'conv') else '')
saved_kernel = weights_dict.get(((scope.name[9:] + sk_prefix) + '/kernel'))
if (saved_kernel is not None):
weight = tf.get_default_graph().get_tensor_by_name(((scope.name + prefix) + '/kernel:0'))
weight = tf.assign(weight, saved_kernel)
tf.add_to_collection('init', weight)
if (layer_type == 'dense'):
saved_bias = weights_dict.get((scope.name[9:] + '/bias'))
if (saved_bias is not None):
bias = tf.get_default_graph().get_tensor_by_name(((scope.name + prefix) + '/bias:0'))
bias = tf.assign(bias, saved_bias)
tf.add_to_collection('init', bias)
elif (layer_type == 'bn'):
saved_beta = weights_dict.get((scope.name[9:] + '/conv2d/beta'))
saved_gamma = weights_dict.get((scope.name[9:] + '/conv2d/gamma'))
saved_moving_mean = weights_dict.get((scope.name[9:] + '/conv2d/moving_mean'))
saved_moving_variance = weights_dict.get((scope.name[9:] + '/conv2d/moving_variance'))
if (saved_beta is not None):
beta = tf.get_default_graph().get_tensor_by_name((scope.name + '/batch_normalization/beta:0'))
gamma = tf.get_default_graph().get_tensor_by_name((scope.name + '/batch_normalization/gamma:0'))
moving_mean = tf.get_default_graph().get_tensor_by_name((scope.name + '/batch_normalization/moving_mean:0'))
moving_variance = tf.get_default_graph().get_tensor_by_name((scope.name + '/batch_normalization/moving_variance:0'))
beta = tf.assign(beta, saved_beta)
gamma = tf.assign(gamma, saved_gamma)
moving_mean = tf.assign(moving_mean, saved_moving_mean)
moving_variance = tf.assign(moving_variance, saved_moving_variance)
tf.add_to_collection('init', beta)
tf.add_to_collection('init', gamma)
tf.add_to_collection('init', moving_mean)
tf.add_to_collection('init', moving_variance)
else:
raise ValueError('unknown layer type')
return
def basic_block(self, net, channels, strides, use_bias, is_training, initializer, regularizer, weights_dict, weight_decay):
origin_input = net
with tf.variable_scope('m1') as nsc:
net = tf.layers.conv2d(net, channels[0], [3, 3], strides=strides, padding='same', use_bias=use_bias, kernel_initializer=initializer(), kernel_regularizer=regularizer(weight_decay))
self.restore_weights(nsc, 'conv', weights_dict)
net = tf.layers.batch_normalization(net, axis=(- 1), training=is_training, epsilon=1e-05, momentum=0.997)
self.restore_weights(nsc, 'bn', weights_dict)
net = tf.nn.relu(net)
with tf.variable_scope('m2') as nsc:
net = tf.layers.conv2d(net, channels[1], [3, 3], strides=1, padding='same', use_bias=use_bias, kernel_initializer=initializer(), kernel_regularizer=regularizer(weight_decay))
self.restore_weights(nsc, 'conv', weights_dict)
net = tf.layers.batch_normalization(net, axis=(- 1), training=is_training, epsilon=1e-05, momentum=0.997)
self.restore_weights(nsc, 'bn', weights_dict)
if ((strides > 1) or (origin_input.get_shape().as_list()[3] != channels[1])):
with tf.variable_scope('shortcut') as nsc:
origin_input = tf.layers.conv2d(origin_input, channels[1], [1, 1], strides=strides, padding='same', use_bias=use_bias, kernel_initializer=initializer(), kernel_regularizer=regularizer(weight_decay))
self.restore_weights(nsc, 'conv', weights_dict)
origin_input = tf.layers.batch_normalization(origin_input, axis=(- 1), training=is_training, epsilon=1e-05, momentum=0.997)
self.restore_weights(nsc, 'bn', weights_dict)
net += origin_input
net = tf.nn.relu(net)
return net
def network(self, inputs, num_classes, scope, is_training, kargs):
print('Use ResNet18')
use_bias = kargs.use_bias
block_sizes = kargs.block_sizes
strides = kargs.strides
initializer = kargs.initializer
regularizer = kargs.regularizer
weights_dict = (kargs.get('weights_dict') or {})
weight_decay = kargs.weight_decay
channel_num = kargs.channels_num
if isinstance(channel_num, dict):
channel_num = list(channel_num.values())
print('Use set channels', channel_num)
else:
print('Use ori channels', channel_num)
with tf.variable_scope((scope + '/conv0')) as nsc:
net = tf.layers.conv2d(inputs, channel_num[0], [7, 7], strides=[2, 2], padding='same', use_bias=use_bias, kernel_initializer=initializer(), kernel_regularizer=regularizer(weight_decay))
self.restore_weights(nsc, 'conv', weights_dict)
net = tf.layers.batch_normalization(net, axis=(- 1), training=is_training, epsilon=1e-05, momentum=0.997, beta_regularizer=regularizer(weight_decay), gamma_regularizer=regularizer(weight_decay))
self.restore_weights(nsc, 'bn', weights_dict)
net = tf.nn.relu(net)
net = tf.layers.max_pooling2d(net, [3, 3], strides=[2, 2], padding='SAME')
for (i, num_block) in enumerate(block_sizes):
with tf.variable_scope((scope + ('/block%d' % (i + 1)))) as nsc:
with tf.variable_scope('sub_block0') as nsc:
net = self.basic_block(net, channel_num[(((2 * i) * num_block) + 1):(((2 * i) * num_block) + 3)], strides[i], use_bias, is_training, initializer, regularizer, weights_dict, weight_decay)
for j in range(1, num_block):
with tf.variable_scope(('sub_block%d' % j)) as nsc:
net = self.basic_block(net, channel_num[((2 * ((i * num_block) + j)) + 1):((2 * ((i * num_block) + j)) + 3)], 1, use_bias, is_training, initializer, regularizer, weights_dict, weight_decay)
with tf.variable_scope((scope + '/dense')) as nsc:
assert (net.get_shape().as_list()[2] == 7)
net = tf.reduce_mean(net, [1, 2], keepdims=False)
net = tf.layers.dense(net, num_classes, use_bias=True, kernel_regularizer=regularizer(weight_decay), kernel_initializer=initializer())
self.restore_weights(nsc, 'dense', weights_dict)
return net |
def test_initial_solutions_are_correct(archive_fixture):
(archive, _) = archive_fixture
initial_solutions = [[0, 1, 2, 3], [(- 1), (- 2), (- 3), (- 4)]]
emitter = GaussianEmitter(archive, sigma=1.0, initial_solutions=initial_solutions)
assert np.all((emitter.ask() == initial_solutions))
assert np.all((emitter.initial_solutions == initial_solutions)) |
(sigma=1000.0)
class Boundary(sc.SampleDomain):
def __init__(self):
self.points = geo.sample_boundary(1)
self.constraints = {'u': np.cosh(self.points['x'])}
def sampling(self, *args, **kwargs):
return (self.points, self.constraints) |
def run(dataset_dir):
if (not tf.gfile.Exists(dataset_dir)):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
(photo_filenames, class_names) = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
_convert_dataset('train', training_filenames, class_names_to_ids, dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids, dataset_dir)
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!') |
class PretrainedFSMTModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def test(cfg_file, ckpt: str, combiner_cfg: dict, batch_class: Batch=Batch, output_path: str=None, save_attention: bool=False, datasets: dict=None) -> None:
cfg = load_config(cfg_file)
model_dir = cfg['training']['model_dir']
check_combiner_cfg(combiner_cfg)
cfg['combiner'] = combiner_cfg
if (len(logger.handlers) == 0):
_ = make_logger(model_dir, mode='test')
if (ckpt is None):
ckpt = get_latest_checkpoint(model_dir)
try:
step = ckpt.split((model_dir + '/'))[1].split('.ckpt')[0]
except IndexError:
step = 'best'
if (datasets is None):
(_, dev_data, test_data, src_vocab, trg_vocab) = load_data(data_cfg=cfg['data'], datasets=['dev', 'test'])
data_to_predict = {'dev': dev_data, 'test': test_data}
else:
data_to_predict = {'dev': datasets['dev'], 'test': datasets['test']}
src_vocab = datasets['src_vocab']
trg_vocab = datasets['trg_vocab']
(batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, max_output_length, beam_size, beam_alpha, postprocess, bpe_type, sacrebleu, decoding_description, tokenizer_info) = parse_test_args(cfg, mode='test')
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
model = build_model(cfg['model'], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint['model_state'])
combiner = build_combiner(cfg)
if (combiner_cfg['type'] == 'dynamic_combiner'):
combiner_checkpoint = load_checkpoint(combiner_cfg['combiner_path'], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint['model_state'])
model.combiner = combiner
if use_cuda:
model.to(device)
if ((n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = _DataParallel(model)
for (data_set_name, data_set) in data_to_predict.items():
if (data_set is None):
continue
dataset_file = ((cfg['data'][data_set_name] + '.') + cfg['data']['trg'])
logger.info('Decoding on %s set (%s)...', data_set_name, dataset_file)
(score, loss, ppl, sources, sources_raw, references, hypotheses, hypotheses_raw, attention_scores) = validate_on_data(model, data=data_set, batch_size=batch_size, batch_class=batch_class, batch_type=batch_type, level=level, max_output_length=max_output_length, eval_metric=eval_metric, use_cuda=use_cuda, compute_loss=False, beam_size=beam_size, beam_alpha=beam_alpha, postprocess=postprocess, bpe_type=bpe_type, sacrebleu=sacrebleu, n_gpu=n_gpu)
if ('trg' in data_set.fields):
logger.info('%4s %s%s: %6.2f [%s]', data_set_name, eval_metric, tokenizer_info, score, decoding_description)
else:
logger.info('No references given for %s -> no evaluation.', data_set_name)
if save_attention:
if attention_scores:
attention_name = '{}.{}.att'.format(data_set_name, step)
attention_path = os.path.join(model_dir, attention_name)
logger.info('Saving attention plots. This might take a while..')
store_attention_plots(attentions=attention_scores, targets=hypotheses_raw, sources=data_set.src, indices=range(len(hypotheses)), output_prefix=attention_path)
logger.info('Attention plots saved to: %s', attention_path)
else:
logger.warning('Attention scores could not be saved. Note that attention scores are not available when using beam search. Set beam_size to 1 for greedy decoding.')
if (output_path is not None):
output_path_set = '{}.{}'.format(output_path, data_set_name)
with open(output_path_set, mode='w', encoding='utf-8') as out_file:
for hyp in hypotheses:
out_file.write((hyp + '\n'))
logger.info('Translations saved to: %s', output_path_set) |
def _get_signature_keys(obj):
parameters = inspect.signature(obj.__init__).parameters
required_parameters = {k: v for (k, v) in parameters.items() if (v.default == inspect._empty)}
optional_parameters = set({k for (k, v) in parameters.items() if (v.default != inspect._empty)})
expected_modules = (set(required_parameters.keys()) - {'self'})
return (expected_modules, optional_parameters) |
class EvoNormSample2d(nn.Module):
def __init__(self, num_features, apply_act=True, groups=8, eps=1e-05, drop_block=None):
super(EvoNormSample2d, self).__init__()
self.apply_act = apply_act
self.groups = groups
self.eps = eps
param_shape = (1, num_features, 1, 1)
self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
if apply_act:
self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if self.apply_act:
nn.init.ones_(self.v)
def forward(self, x):
assert (x.dim() == 4), 'expected 4D input'
(B, C, H, W) = x.shape
assert ((C % self.groups) == 0)
if self.apply_act:
n = (x * (x * self.v).sigmoid())
x = x.reshape(B, self.groups, (- 1))
x = (n.reshape(B, self.groups, (- 1)) / (x.var(dim=(- 1), unbiased=False, keepdim=True) + self.eps).sqrt())
x = x.reshape(B, C, H, W)
return ((x * self.weight) + self.bias) |
class TFArgument(Argument):
_str_values = ['', '1', 'sum', 'same', 'valid', 'zeros']
_float_values = [0.0, 1.0, (- 1.0), 63.0, (- 63.0)]
_tensor_arg_dtypes = [ArgType.TF_TENSOR, ArgType.KERAS_TENSOR, ArgType.TF_VARIABLE]
_dtypes = [tf.bfloat16, tf.bool, tf.complex128, tf.complex64, tf.double, tf.float16, tf.float32, tf.float64, tf.half, tf.int16, tf.int32, tf.int64, tf.int8, tf.uint8, tf.uint16, tf.uint32, tf.uint64]
_support_types = [ArgType.TF_TENSOR, ArgType.TF_VARIABLE, ArgType.KERAS_TENSOR, ArgType.TF_DTYPE, ArgType.TF_OBJECT]
def __init__(self, value, type: ArgType, minv=0, maxv=0, shape=None, dtype=None) -> None:
if isinstance(dtype, str):
dtype = self.str_to_dtype(dtype)
shape = self.shape_to_list(shape)
super().__init__(value, type)
self.minv = minv
self.maxv = maxv
self.shape = shape
self.dtype = dtype
def str_to_dtype(dt: str):
dt = dt.strip().replace('_ref', '')
if (not dt.startswith('tf.')):
dt = ('tf.' + dt)
try:
return eval(dt)
except:
return tf.float32
def shape_to_list(shape):
if (shape is None):
return None
if (not isinstance(shape, list)):
try:
shape = shape.as_list()
except:
shape = list(shape)
else:
shape = list(shape)
shape = [(1 if (x is None) else x) for x in shape]
return shape
def get_type(x):
res = Argument.get_type(x)
if (res != None):
return res
if tf.is_tensor(x):
if tf.keras.backend.is_keras_tensor(x):
return ArgType.KERAS_TENSOR
return ArgType.TF_TENSOR
elif isinstance(x, tf.DType):
return ArgType.TF_DTYPE
def mutate_value_random(self) -> None:
if (self.type == ArgType.INT):
self.value = self.mutate_int_value(self.value)
elif (self.type == ArgType.STR):
self.value = self.mutate_str_value(self.value)
elif (self.type == ArgType.FLOAT):
self.value = self.mutate_float_value(self.value)
elif (self.type == ArgType.BOOL):
self.value = self.mutate_bool_value(self.value)
elif ((self.type == ArgType.TUPLE) or (self.type == ArgType.LIST)):
for arg in self.value:
arg.mutate_value_random()
elif (self.type in self._tensor_arg_dtypes):
(self.minv, self.maxv) = self.random_tensor_value_range(self.dtype)
elif (self.type == ArgType.TF_DTYPE):
self.value = TFArgument.mutate_dtype()
elif (self.type == ArgType.TF_OBJECT):
pass
elif (self.type == ArgType.NULL):
pass
else:
raise ValueError(self.type)
assert 0
def if_mutate_shape(self):
return (random.random() < 0.3)
def if_mutate_shape_value(self):
return (random.random() < 0.3)
def if_expand_dim(self):
return (random.random() < 0.3)
def if_squeeze(self):
return (random.random() < 0.3)
def mutate_shape(self, old_shape):
new_shape = old_shape
if self.if_expand_dim():
new_shape.append(1)
elif ((len(new_shape) > 0) and self.if_squeeze()):
new_shape.pop()
for i in range(len(new_shape)):
if self.if_mutate_shape_value():
new_shape[i] = self.mutate_int_value(new_shape[i], minv=0)
return new_shape
def generate_value_random(self) -> None:
if (self.type == ArgType.INT):
self.value = self.mutate_int_value(0)
elif (self.type == ArgType.STR):
self.value = self.mutate_str_value('')
elif (self.type == ArgType.FLOAT):
self.value = self.mutate_float_value(0.0)
elif (self.type == ArgType.BOOL):
self.value = self.mutate_bool_value(True)
elif ((self.type == ArgType.TUPLE) or (self.type == ArgType.LIST)):
self.value = [TFArgument(1, ArgType.INT), TFArgument(1, ArgType.INT)]
elif (self.type in self._tensor_arg_dtypes):
shape = [randint(1, 3), randint(1, 3)]
dtype = choice([tf.int32, tf.float32, tf.float64])
(self.shape, self.dtype) = (shape, dtype)
(self.value, self.minv, self.maxv) = (None, 0, 1)
elif (self.type == ArgType.TF_DTYPE):
self.value = choice(self._dtypes)
elif (self.type == ArgType.TF_OBJECT):
self.value = None
pass
elif (self.type == ArgType.NULL):
self.value = None
pass
else:
assert 0
def mutate_type(self) -> None:
def if_mutate_primitive():
return (random.random() < 0.1)
def if_mutate_null():
return (random.random() < 0.1)
if (self.type in [ArgType.INT, ArgType.FLOAT, ArgType.STR, ArgType.BOOL]):
if (not if_mutate_primitive()):
return False
types = [ArgType.INT, ArgType.FLOAT, ArgType.STR, ArgType.BOOL]
types.remove(self.type)
self.type = choice(types)
if (self.type == ArgType.INT):
self.value = self.mutate_int_value(0)
elif (self.type == ArgType.FLOAT):
self.value = self.mutate_float_value(0.0)
elif (self.type == ArgType.STR):
self.value = self.mutate_str_value('')
elif (self.type == ArgType.BOOL):
self.value = choice([True, False])
elif (self.type in [ArgType.LIST, ArgType.TUPLE]):
if (random.random() < 0.01):
self.value = []
for arg in self.value:
arg.mutate_type()
elif (self.type == ArgType.TF_TENSOR):
dtype = choice(self._dtypes)
shape = self.shape
if self.if_mutate_shape():
shape = self.mutate_shape(shape)
(self.shape, self.dtype) = (shape, dtype)
elif (self.type == ArgType.TF_OBJECT):
pass
elif (self.type == ArgType.NULL):
if (not if_mutate_null()):
return False
new_type = choice((self._support_types + super()._support_types))
if ((new_type == ArgType.LIST) or (new_type == ArgType.TUPLE)):
self.value = [TFArgument(2, ArgType.INT), TFArgument(3, ArgType.INT)]
elif (new_type == ArgType.TF_TENSOR):
self.shape = [2, 2]
self.dtype = tf.float32
if (new_type != ArgType.NULL):
try:
self.type = new_type
self.generate_value_random()
except:
pass
elif (self.type == ArgType.TF_DTYPE):
self.value = choice(TFArgument._dtypes)
return True
def if_mutate_int_random():
return (random.random() < 0.2)
def if_mutate_str_random():
return (random.random() < 0.1)
def if_mutate_float_random():
return (random.random() < 0.2)
def mutate_bool_value(self, value) -> bool:
return choice([True, False])
def mutate_int_value(self, value, minv=None, maxv=None) -> int:
if TFArgument.if_mutate_int_random():
value = choice(self._int_values)
else:
value += randint((- 2), 2)
if (minv is not None):
value = max(minv, value)
if (maxv is not None):
value = min(maxv, value)
return value
def mutate_str_value(self, value) -> str:
if TFArgument.if_mutate_str_random():
return choice(self._str_values)
return value
def mutate_dtype() -> tf.dtypes.DType:
return choice(TFArgument._dtypes)
def low_precision_dtype(dtype):
if (dtype in [tf.int16, tf.int32, tf.int64]):
return tf.int8
elif (dtype in [tf.float32, tf.float64]):
return tf.float16
elif (dtype in [tf.complex128]):
return tf.complex64
return dtype
def random_tensor_value_range(dtype):
assert isinstance(dtype, tf.dtypes.DType)
minv = 0
maxv = 1
if (dtype.is_floating or dtype.is_complex or (dtype == tf.string) or (dtype == tf.bool)):
pass
elif (('int64' in dtype.name) or ('int32' in dtype.name) or ('int16' in dtype.name)):
minv = (0 if ('uint' in dtype.name) else (- (1 << 8)))
maxv = (1 << 8)
else:
try:
minv = dtype.min
maxv = dtype.max
except Exception as e:
(minv, maxv) = (0, 1)
return (minv, maxv)
def to_code_tensor(self, var_name, low_precision=False):
dtype = self.dtype
if low_precision:
dtype = self.low_precision_dtype(dtype)
shape = self.shape
if (dtype is None):
assert 0
code = ''
var_tensor_name = f'{var_name}_tensor'
if dtype.is_floating:
code += ('%s = tf.random.uniform(%s, dtype=tf.%s)\n' % (var_tensor_name, shape, dtype.name))
elif dtype.is_complex:
ftype = ('float64' if (dtype == tf.complex128) else 'float32')
code += ('%s = tf.complex(tf.random.uniform(%s, dtype=tf.%s),tf.random.uniform(%s, dtype=tf.%s))\n' % (var_tensor_name, shape, ftype, shape, ftype))
elif (dtype == tf.bool):
code += ('%s = tf.cast(tf.random.uniform(%s, minval=0, maxval=2, dtype=tf.int32), dtype=tf.bool)\n' % (var_tensor_name, shape))
elif (dtype == tf.string):
code += ('%s = tf.convert_to_tensor(np.ones(%s, dtype=str))\n' % (var_tensor_name, shape))
elif (dtype in [tf.int32, tf.int64]):
code += ('%s = tf.random.uniform(%s, minval=%d, maxval=%d, dtype=tf.%s)\n' % (var_tensor_name, shape, self.minv, (self.maxv + 1), dtype.name))
else:
code += ('%s = tf.saturate_cast(tf.random.uniform(%s, minval=%d, maxval=%d, dtype=tf.int64), dtype=tf.%s)\n' % (var_tensor_name, shape, self.minv, (self.maxv + 1), dtype.name))
code += f'''{var_name} = tf.identity({var_tensor_name})
'''
return code
def to_code_keras_tensor(self, var_name, low_precision=False):
return self.to_code_tensor(var_name, low_precision=low_precision)
def to_code(self, var_name, low_precision=False) -> str:
if (self.type in [ArgType.LIST, ArgType.TUPLE]):
code = ''
arg_name_list = ''
for i in range(len(self.value)):
code += self.value[i].to_code(f'{var_name}_{i}', low_precision)
arg_name_list += f'{var_name}_{i},'
if (self.type == ArgType.LIST):
code += f'''{var_name} = [{arg_name_list}]
'''
else:
code += f'''{var_name} = ({arg_name_list})
'''
return code
elif (self.type == ArgType.TF_OBJECT):
return ('%s = None\n' % var_name)
elif (self.type == ArgType.TF_DTYPE):
return ('%s = tf.%s\n' % (var_name, self.value.name))
elif (self.type in self._tensor_arg_dtypes):
code = ''
if (self.type == ArgType.TF_TENSOR):
code = self.to_code_tensor(var_name, low_precision=low_precision)
elif (self.type == ArgType.TF_VARIABLE):
code = self.to_code_tensor(var_name, low_precision=low_precision)
code += ('%s = tf.Variable(%s)\n' % (var_name, var_name))
elif (self.type == ArgType.KERAS_TENSOR):
code = self.to_code_keras_tensor(var_name, low_precision=low_precision)
return code
return super().to_code(var_name)
def to_diff_code(self, var_name, low_precision=False) -> str:
if (self.type in [ArgType.LIST, ArgType.TUPLE]):
code = ''
arg_name_list = ''
for i in range(len(self.value)):
code += self.value[i].to_diff_code(f'{var_name}_{i}', low_precision)
arg_name_list += f'{var_name}_{i},'
if (self.type == ArgType.LIST):
code += f'''{var_name} = [{arg_name_list}]
'''
else:
code += f'''{var_name} = ({arg_name_list})
'''
return code
elif (self.type == ArgType.TF_OBJECT):
return ('%s = None\n' % var_name)
elif (self.type == ArgType.TF_DTYPE):
return ('%s = tf.%s\n' % (var_name, self.value.name))
elif (self.type in self._tensor_arg_dtypes):
code = f'''{var_name} = tf.identity({var_name}_tensor)
'''
if (not low_precision):
code += f'''{var_name} = tf.cast({var_name}, tf.{self.dtype.name})
'''
if (self.type == ArgType.TF_VARIABLE):
code += ('%s = tf.Variable(%s)\n' % (var_name, var_name))
return code
return ''
def mutate_value(self):
self.mutate_value_random()
def generate_arg_from_signature(signature):
if isinstance(signature, bool):
return TFArgument(signature, ArgType.BOOL)
if isinstance(signature, int):
return TFArgument(signature, ArgType.INT)
if isinstance(signature, float):
return TFArgument(signature, ArgType.FLOAT)
if isinstance(signature, str):
return TFArgument(signature, ArgType.STR)
if isinstance(signature, list):
value = []
for elem in signature:
value.append(TFArgument.generate_arg_from_signature(elem))
return TFArgument(value, ArgType.LIST)
if isinstance(signature, tuple):
value = []
for elem in signature:
value.append(TFArgument.generate_arg_from_signature(elem))
return TFArgument(value, ArgType.TUPLE)
if (not isinstance(signature, dict)):
return TFArgument(None, ArgType.NULL)
if (('type' not in signature) and ('Label' not in signature)):
return TFArgument(None, ArgType.NULL)
label = (signature['type'] if ('type' in signature) else signature['Label'])
if (label == 'tf_object'):
if ('class_name' not in signature):
return TFArgument(None, ArgType.TF_OBJECT)
if ((signature['class_name'] == 'tensorflow.python.keras.engine.keras_tensor.KerasTensor') or (signature['class_name'] == 'tensorflow.python.ops.variables.RefVariable')):
dtype = signature['dtype']
shape = signature['shape']
dtype = TFArgument.str_to_dtype(dtype)
(minv, maxv) = TFArgument.random_tensor_value_range(dtype)
return TFArgument(None, ArgType.TF_TENSOR, minv, maxv, shape, dtype)
if (signature['class_name'] == 'tensorflow.python.framework.dtypes.DType'):
name = signature['to_str'].replace("<dtype: '", '').replace("'>", '')
value = eval(('tf.' + name))
return TFArgument(value, ArgType.TF_DTYPE)
try:
value = eval(signature.class_name)
except:
value = None
return TFArgument(value, ArgType.TF_OBJECT)
if (label == 'raw'):
try:
value = json.loads(signature['value'])
except:
value = signature['value']
pass
if isinstance(value, int):
return TFArgument(value, ArgType.INT)
if isinstance(value, str):
return TFArgument(value, ArgType.STR)
if isinstance(value, float):
return TFArgument(value, ArgType.FLOAT)
if isinstance(value, tuple):
tuple_value = []
for elem in value:
tuple_value.append(TFArgument.generate_arg_from_signature(elem))
return TFArgument(tuple_value, ArgType.TUPLE)
if isinstance(value, list):
list_value = []
for elem in value:
list_value.append(TFArgument.generate_arg_from_signature(elem))
return TFArgument(list_value, ArgType.LIST)
if (label == 'tuple'):
try:
value = json.loads(signature['value'])
tuple_value = []
for elem in value:
tuple_value.append(TFArgument.generate_arg_from_signature(elem))
return TFArgument(tuple_value, ArgType.TUPLE)
except:
raise ValueError(('Wrong signature ' + str(signature)))
if (label == 'list'):
try:
try:
value = json.loads(signature['value'])
except:
value = signature['value']
list_value = []
for elem in value:
list_value.append(TFArgument.generate_arg_from_signature(elem))
return TFArgument(list_value, ArgType.LIST)
except:
raise ValueError(('Wrong signature ' + str(signature)))
if (label in ['tensor', 'KerasTensor', 'variable', 'nparray']):
if (not (('shape' in signature.keys()) and ('dtype' in signature.keys()))):
raise Exception('Wrong signature {0}'.format(signature))
shape = signature['shape']
dtype = signature['dtype']
dtype = TFArgument.str_to_dtype(dtype)
if isinstance(shape, (list, tuple)):
(minv, maxv) = TFArgument.random_tensor_value_range(dtype)
return TFArgument(None, ArgType.TF_TENSOR, minv, maxv, shape, dtype)
else:
(minv, maxv) = (0, 1)
shape = [1]
return TFArgument(None, ArgType.TF_TENSOR, minv, maxv, shape, dtype)
return TFArgument(None, ArgType.NULL) |
def main(args):
(args, dataset, flownmt) = setup(args)
print(args)
(val_iter, test_iter) = init_dataloader(args, dataset)
result_path = args.result_path
if (args.decode == 'argmax'):
tau = args.tau
n_tr = args.ntr
outfile = 'argmax.t{:.1f}.ntr{}.dev.mt'.format(tau, n_tr)
translate_argmax(dataset, val_iter, flownmt, result_path, outfile, tau, n_tr)
outfile = 'argmax.t{:.1f}.ntr{}.test.mt'.format(tau, n_tr)
translate_argmax(dataset, test_iter, flownmt, result_path, outfile, tau, n_tr)
elif (args.decode == 'iw'):
tau = args.tau
n_len = args.nlen
n_tr = args.ntr
outfile = 'iw.t{:.1f}.nlen{}.ntr{}.dev.mt'.format(tau, n_len, n_tr)
translate_iw(dataset, val_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
outfile = 'iw.t{:.1f}.nlen{}.ntr{}.test.mt'.format(tau, n_len, n_tr)
translate_iw(dataset, test_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
else:
assert (not args.bucket_batch)
tau = args.tau
n_len = args.nlen
n_tr = args.ntr
outfile = 'sample.t{:.1f}.nlen{}.ntr{}.dev.mt'.format(tau, n_len, n_tr)
sample(dataset, val_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
outfile = 'sample.t{:.1f}.nlen{}.ntr{}.test.mt'.format(tau, n_len, n_tr)
sample(dataset, test_iter, flownmt, result_path, outfile, tau, n_len, n_tr) |
_module()
class SegmindLoggerHook(LoggerHook):
def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True):
super(SegmindLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch)
self.import_segmind()
def import_segmind(self):
try:
import segmind
except ImportError:
raise ImportError("Please run 'pip install segmind' to install segmind")
self.log_metrics = segmind.tracking.fluent.log_metrics
self.mlflow_log = segmind.utils.logging_utils.try_mlflow_log
_only
def log(self, runner):
tags = self.get_loggable_tags(runner)
if tags:
self.mlflow_log(self.log_metrics, tags, step=runner.epoch, epoch=runner.epoch) |
def get_path_bond_feature(bond):
if (bond is None):
return np.zeros(N_BOND_FEATS)
else:
bond_type = onek_unk_encoding(bond.GetBondType(), BOND_TYPES)
conj = [int(bond.GetIsConjugated())]
ring = [int(bond.IsInRing())]
return np.array(((bond_type + conj) + ring)) |
def convert_all_pt_checkpoints_to_tf(args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None, compare_with_pt_model=False, use_cached_models=False, remove_cached_files=False, only_convert_finetuned_models=False):
assert os.path.isdir(args.tf_dump_path), '--tf_dump_path should be a directory'
if (args_model_type is None):
model_types = list(MODEL_CLASSES.keys())
else:
model_types = [args_model_type]
for (j, model_type) in enumerate(model_types, start=1):
print(('=' * 100))
print(' Converting model type {}/{}: {}'.format(j, len(model_types), model_type))
print(('=' * 100))
if (model_type not in MODEL_CLASSES):
raise ValueError('Unrecognized model type {}, should be one of {}.'.format(model_type, list(MODEL_CLASSES.keys())))
(config_class, model_class, pt_model_class, aws_model_maps, aws_config_map) = MODEL_CLASSES[model_type]
if (model_shortcut_names_or_path is None):
model_shortcut_names_or_path = list(aws_model_maps.keys())
if (config_shortcut_names_or_path is None):
config_shortcut_names_or_path = model_shortcut_names_or_path
for (i, (model_shortcut_name, config_shortcut_name)) in enumerate(zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1):
print(('-' * 100))
if (('-squad' in model_shortcut_name) or ('-mrpc' in model_shortcut_name) or ('-mnli' in model_shortcut_name)):
if (not only_convert_finetuned_models):
print(' Skipping finetuned checkpoint {}'.format(model_shortcut_name))
continue
model_type = model_shortcut_name
elif only_convert_finetuned_models:
print(' Skipping not finetuned checkpoint {}'.format(model_shortcut_name))
continue
print(' Converting checkpoint {}/{}: {} - model_type {}'.format(i, len(aws_config_map), model_shortcut_name, model_type))
print(('-' * 100))
if (config_shortcut_name in aws_config_map):
config_file = cached_path(aws_config_map[config_shortcut_name], force_download=(not use_cached_models))
else:
config_file = cached_path(config_shortcut_name, force_download=(not use_cached_models))
if (model_shortcut_name in aws_model_maps):
model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=(not use_cached_models))
else:
model_file = cached_path(model_shortcut_name, force_download=(not use_cached_models))
if os.path.isfile(model_shortcut_name):
model_shortcut_name = 'converted_model'
convert_pt_checkpoint_to_tf(model_type=model_type, pytorch_checkpoint_path=model_file, config_file=config_file, tf_dump_path=os.path.join(tf_dump_path, (model_shortcut_name + '-tf_model.h5')), compare_with_pt_model=compare_with_pt_model)
if remove_cached_files:
os.remove(config_file)
os.remove(model_file) |
def extract_ext_funcs(finit):
fdict = {}
def _list(name, func):
fdict[name] = func
myf = convert_to_tvm_func(_list)
ret = finit(myf.handle)
_ = myf
if (ret != 0):
raise RuntimeError(('cannot initialize with %s' % finit))
return fdict |
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1)
self.w_2 = nn.Conv1d(d_hid, d_in, 1)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm((output + residual))
return output |
def check_goldstein_conditions(step_size, loss, grad_norm, loss_next, c, beta_b, beta_f, bound_step_size, eta_max):
found = 0
if (loss_next <= (loss - ((step_size * c) * (grad_norm ** 2)))):
found = 1
if (loss_next >= (loss - ((step_size * (1 - c)) * (grad_norm ** 2)))):
if (found == 1):
found = 3
else:
found = 2
if (found == 0):
raise ValueError('Error')
elif (found == 1):
step_size = (step_size * beta_f)
if bound_step_size:
step_size = min(step_size, eta_max)
elif (found == 2):
step_size = max((step_size * beta_b), 1e-08)
return {'found': found, 'step_size': step_size} |
def get_bn_params(**params):
axis = (4 if (backend.image_data_format() == 'channels_last') else 1)
default_bn_params = {'axis': axis, 'epsilon': 9.e-06}
default_bn_params.update(params)
return default_bn_params |
class Dataloder():
def __init__(self, config):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(0.4, 0.4, 0.4), transforms.ToTensor(), Lighting(0.1), normalize])
transform_test = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
trainset = datasets.ImageFolder(os.path.join(config.dataset_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(config.dataset_path, 'test'), transform_test)
kwargs = ({'num_workers': 8, 'pin_memory': True} if config.cuda else {})
trainloader = torch.utils.data.DataLoader(trainset, batch_size=config.batch_size, shuffle=True, **kwargs)
testloader = torch.utils.data.DataLoader(testset, batch_size=config.batch_size, shuffle=False, **kwargs)
self.trainloader = trainloader
self.testloader = testloader
self.classes = trainset.classes
def getloader(self):
return (self.classes, self.trainloader, self.testloader) |
class TableTransformerForObjectDetection(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_lvis_json(annotations_json_file: str, image_root: str, dataset_name: str):
lvis_api = _load_lvis_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name)
img_ids = sorted(lvis_api.imgs.keys())
imgs = lvis_api.load_imgs(img_ids)
logger = logging.getLogger(__name__)
logger.info('Loaded {} images in LVIS format from {}'.format(len(imgs), annotations_json_file))
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records |
class CAD(AbstractCAD):
def __init__(self, model, dataset, optimizer, hparams):
super(CAD, self).__init__(model, dataset, optimizer, hparams, is_conditional=False) |
def load_pretrained(model_args, training_args) -> Tuple[(nn.Module, PREPROCESSOR)]:
type_ = model_args.type
if (type_ == 'llava'):
return load_pretrained_llava(model_args, training_args)
else:
assert False |
class MCTCTConfig(PretrainedConfig):
model_type = 'mctct'
def __init__(self, vocab_size=8065, hidden_size=1536, num_hidden_layers=36, intermediate_size=6144, num_attention_heads=4, attention_head_dim=384, max_position_embeddings=920, layer_norm_eps=1e-05, layerdrop=0.3, hidden_act='relu', initializer_range=0.02, hidden_dropout_prob=0.3, attention_probs_dropout_prob=0.3, pad_token_id=1, bos_token_id=0, eos_token_id=2, conv_glu_dim=1, conv_dropout=0.3, num_conv_layers=1, conv_kernel=(7,), conv_stride=(3,), input_feat_per_channel=80, input_channels=1, conv_channels=None, ctc_loss_reduction='sum', ctc_zero_infinity=False, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.layerdrop = layerdrop
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.conv_glu_dim = conv_glu_dim
self.conv_dropout = conv_dropout
self.num_conv_layers = num_conv_layers
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.conv_channels = conv_channels
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.conv_kernel = list(conv_kernel)
self.conv_stride = list(conv_stride)
if (len(self.conv_kernel) != self.num_conv_layers):
raise ValueError(f'Configuration for convolutional module is incorrect. It is required that `len(config.conv_kernel)` == `config.num_conv_layers` but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, `config.num_conv_layers = {self.num_conv_layers}`.') |
_module()
class TensorRTDetector(TextDetectorMixin, SingleStageTextDetector):
def __init__(self, trt_file: str, cfg: Any, device_id: int, show_score: bool=False):
if ('type' in cfg.model):
cfg.model.pop('type')
SingleStageTextDetector.__init__(self, **cfg.model)
TextDetectorMixin.__init__(self, show_score)
from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with TensorRT from source.')
model = TRTWrapper(trt_file, input_names=['input'], output_names=['output'])
self.model = model
self.device_id = device_id
self.cfg = cfg
def forward_train(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def simple_test(self, img: torch.Tensor, img_metas: Iterable, rescale: bool=False):
with torch.cuda.device(self.device_id), torch.no_grad():
trt_pred = self.model({'input': img})['output']
if (len(img_metas) > 1):
boundaries = [self.bbox_head.get_boundary(*trt_pred[i].unsqueeze(0), [img_metas[i]], rescale) for i in range(len(img_metas))]
else:
boundaries = [self.bbox_head.get_boundary(*trt_pred, img_metas, rescale)]
return boundaries |
class IntegerNode(ExprNode):
def __init__(self, parse_info=None, raw_text=None, value=None):
super().__init__(IRNodeType.Integer, parse_info=parse_info, raw_text=raw_text)
self.value = value |
class SupervisedCorrectionReader(Reader):
def __init__(self, labels, test=False):
super().__init__(labels, test)
self.db = FEVERDocumentDatabase('resources/wikipedia/fever.db')
self.using_gold = False
self.using_pipeline = False
def generate_instances(self, instance):
if ((instance['verdict'] is None) or ((instance['verdict'] not in self.labels) and (not self.test))):
return None
collected_evidence = []
if ('pipeline_text' in instance):
assert (not self.using_gold)
self.using_pipeline = True
for (page, evidence) in instance['pipeline_text']:
collected_evidence.append(self.maybe_format(page, evidence))
else:
assert (not self.using_pipeline)
self.using_gold = True
for (page, line) in self.deduplicate(instance['evidence']):
if ((page is None) or (line is None)):
continue
found_page = self.db.get_doc_lines(page.split('#')[0])
if (found_page is None):
print('Page {} not found'.format(page))
continue
found_page = found_page.split('\n')
assert (line < len(found_page))
ev_splits = found_page[line].split('\t')
assert (len(ev_splits) > 0)
evidence = found_page[line].split('\t')[1].strip()
if (len(evidence) == 0):
print('Zero evidence for: {} {}'.format(page, line))
continue
assert (len(evidence) > 0)
collected_evidence.append(self.maybe_format(page, evidence))
evidence = ' ### '.join(collected_evidence)
a = {'source': instance['mutated'], 'target': instance['original'], 'evidence': evidence, 'mutation_type': instance['mutation'], 'veracity': instance['verdict']}
(yield a) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.