code stringlengths 17 6.64M |
|---|
def train_avmnist_track_acc(model, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=None, num_epochs=200, verbose=False, multitask=False):
best_model_sd = copy.deepcopy(model.state_dict())
best_acc = 0
for epoch in range(num_epochs):
for phase in ['train', 'dev']:
if (phase == 'train'):
if (not isinstance(scheduler, sc.LRCosineAnnealingScheduler)):
scheduler.step()
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
(rgb, snd, label) = (data['image'], data['audio'], data['label'])
rgb = rgb.to(device)
snd = snd.to(device)
label = label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled((phase == 'train')):
output = model((rgb, snd))
if (not multitask):
(_, preds) = torch.max(output, 1)
loss = criteria[0](output, label)
else:
(_, preds) = torch.max(sum(output), 1)
loss = ((criteria[0](output[0], label) + criteria[1](output[1], label)) + criteria[2](output[2], label))
if (phase == 'train'):
if isinstance(scheduler, sc.LRCosineAnnealingScheduler):
scheduler.step()
scheduler.update_optimizer(optimizer)
loss.backward()
optimizer.step()
running_loss += (loss.item() * rgb.size(0))
running_corrects += torch.sum((preds == label.data))
epoch_acc = (running_corrects.double() / dataset_sizes[phase])
print('{} Acc: {:.4f}'.format(phase, epoch_acc))
if ((phase == 'dev') and (epoch_acc > best_acc)):
best_acc = epoch_acc
best_model_sd = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_sd)
model.train(False)
return best_acc
|
def test_avmnist_track_acc(model, dataloaders, dataset_sizes, device=None, multitask=False):
model.train(False)
phase = 'test'
running_corrects = 0
for data in dataloaders[phase]:
(rgb, snd, label) = (data['image'], data['audio'], data['label'])
rgb = rgb.to(device)
snd = snd.to(device)
label = label.to(device)
output = model((rgb, snd))
if (not multitask):
(_, preds) = torch.max(output, 1)
else:
(_, preds) = torch.max(sum(output), 1)
running_corrects += torch.sum((preds == label.data))
acc = (running_corrects.double() / dataset_sizes[phase])
return acc
|
def train_cifar_track_acc(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device, num_epochs=200, verbose=False, use_intermediate=False):
best_model_sd = copy.deepcopy(model.state_dict())
best_error = 1e+100
criterion2 = torch.nn.CrossEntropyLoss()
for epoch in range(num_epochs):
if verbose:
print()
for phase in ['train', 'dev']:
if (phase == 'train'):
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
(rgb, gt_label) = (data[0], data[1])
rgb = rgb.to(device)
gt_label = gt_label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled((phase == 'train')):
(output, output_i) = model(rgb)
if (not use_intermediate):
loss = criterion(output, gt_label)
else:
loss = (criterion(output, gt_label) + (0.4 * criterion2(output_i, gt_label)))
(_, preds) = torch.max(output, 1)
if (phase == 'train'):
scheduler.step()
if isinstance(scheduler, sc.LRCosineAnnealingScheduler):
scheduler.update_optimizer(optimizer)
loss.backward()
optimizer.step()
running_loss += (loss.item() * rgb.size(0))
running_corrects += torch.sum((preds == gt_label.data))
epoch_error = (1.0 - (running_corrects.double() / dataset_sizes[phase]))
if (phase == 'dev'):
if (epoch_error < best_error):
best_error = epoch_error
best_model_sd = copy.deepcopy(model.state_dict())
if verbose:
print('Epoch #{} val error: {}'.format(epoch, epoch_error))
model.load_state_dict(best_model_sd)
model.train(False)
if verbose:
print('Best val error: {}'.format(best_error))
return (1.0 - best_error)
|
def test_cifar_track_acc(model, dataloaders, dataset_sizes, device):
phase = 'test'
model.train(False)
running_corrects = 0
for data in dataloaders[phase]:
(rgb, gt_label) = (data[0], data[1])
rgb = rgb.to(device)
gt_label = gt_label.to(device)
(output, _) = model(rgb)
(_, preds) = torch.max(output, 1)
running_corrects += torch.sum((preds == gt_label.data))
acc = (running_corrects.double() / dataset_sizes[phase])
return acc
|
def train_mmimdb_track_f1(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, device=None, num_epochs=200, verbose=False, init_f1=0.0, th_fscore=0.3):
best_model_sd = copy.deepcopy(model.state_dict())
best_f1 = init_f1
failsafe = True
cont_overloop = 0
while failsafe:
for epoch in range(num_epochs):
for phase in ['train', 'dev']:
if (phase == 'train'):
if (not isinstance(scheduler, sc.LRCosineAnnealingScheduler)):
scheduler.step()
model.train(True)
else:
model.train(False)
list_preds = []
list_label = []
running_loss = 0.0
for data in dataloaders[phase]:
(image, text, label) = (data['image'], data['text'], data['label'])
image = image.to(device)
text = text.to(device)
label = label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled((phase == 'train')):
output = model(text, image)
if isinstance(output, tuple):
output = output[(- 1)]
(_, preds) = torch.max(output, 1)
loss = criterion(output, label)
if (phase == 'train'):
if isinstance(scheduler, sc.LRCosineAnnealingScheduler):
scheduler.step()
scheduler.update_optimizer(optimizer)
loss.backward()
optimizer.step()
if (phase == 'dev'):
preds_th = (torch.nn.functional.sigmoid(output) > th_fscore)
list_preds.append(preds_th.cpu())
list_label.append(label.cpu())
running_loss += (loss.item() * image.size(0))
epoch_loss = (running_loss / dataset_sizes[phase])
if (phase == 'dev'):
y_pred = torch.cat(list_preds, dim=0).numpy()
y_true = torch.cat(list_label, dim=0).numpy()
curr_f1 = f1_score(y_true, y_pred, average='samples')
if verbose:
print('epoch #{} {} F1: {:.4f} '.format(epoch, phase, curr_f1))
if ((phase == 'train') and (epoch_loss != epoch_loss)):
print('Nan loss during training, escaping')
model.load_state_dict(best_model_sd)
model.train(False)
return best_f1
if (phase == 'dev'):
if (curr_f1 > best_f1):
best_f1 = curr_f1
best_model_sd = copy.deepcopy(model.state_dict())
if ((best_f1 != best_f1) and (num_epochs == 1) and (cont_overloop < 1)):
failsafe = True
print('Recording a NaN F1, training for one more epoch.')
else:
failsafe = False
cont_overloop += 1
model.load_state_dict(best_model_sd)
model.train(False)
if (best_f1 != best_f1):
best_f1 = 0.0
return best_f1
|
def train_ntu_track_acc(model, criteria, optimizer, scheduler, dataloaders, dataset_sizes, device=None, num_epochs=200, verbose=False, multitask=False):
best_model_sd = copy.deepcopy(model.state_dict())
best_acc = 0
for epoch in range(num_epochs):
for phase in ['train', 'dev']:
if (phase == 'train'):
if (not isinstance(scheduler, sc.LRCosineAnnealingScheduler)):
scheduler.step()
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
(rgb, ske, label) = (data['rgb'], data['ske'], data['label'])
rgb = rgb.to(device)
ske = ske.to(device)
label = label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled((phase == 'train')):
output = model((rgb, ske))
if (not multitask):
(_, preds) = torch.max(output, 1)
if isinstance(criteria, list):
loss = criteria[0](output, label)
else:
loss = criteria(output, label)
else:
(_, preds) = torch.max(sum(output), 1)
loss = ((criteria[0](output[0], label) + criteria[1](output[1], label)) + criteria[2](output[2], label))
if (phase == 'train'):
if isinstance(scheduler, sc.LRCosineAnnealingScheduler):
scheduler.step()
scheduler.update_optimizer(optimizer)
loss.backward()
optimizer.step()
running_loss += (loss.item() * rgb.size(0))
running_corrects += torch.sum((preds == label.data))
epoch_loss = (running_loss / dataset_sizes[phase])
epoch_acc = (running_corrects.double() / dataset_sizes[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if ((phase == 'dev') and (epoch_acc > best_acc)):
best_acc = epoch_acc
best_model_sd = copy.deepcopy(model.state_dict())
model.load_state_dict(best_model_sd)
model.train(False)
return best_acc
|
def test_ntu_track_acc(model, dataloaders, dataset_sizes, device=None, multitask=False):
model.train(False)
phase = 'test'
running_corrects = 0
for data in dataloaders[phase]:
(rgb, ske, label) = (data['rgb'], data['ske'], data['label'])
rgb = rgb.to(device)
ske = ske.to(device)
label = label.to(device)
output = model((rgb, ske))
if (not multitask):
(_, preds) = torch.max(output, 1)
else:
(_, preds) = torch.max(sum(output), 1)
running_corrects += torch.sum((preds == label.data))
acc = (running_corrects.double() / dataset_sizes[phase])
return acc
|
class ModelSearcher():
def __init__(self, args):
self.args = args
def search(self):
pass
def _epnas(self, model_type, surrogate_dict, dataloaders, dataset_searchmethods, device):
surrogate = surrogate_dict['model']
s_crite = surrogate_dict['criterion']
s_data = surr.SurrogateDataloader()
s_optim = op.Adam(surrogate.parameters(), lr=self.args.lr_surrogate)
train_sampled_models = dataset_searchmethods['train_sampled_fun']
get_possible_layer_configurations = dataset_searchmethods['get_layer_confs']
temperature = self.args.initial_temperature
sampled_k_confs = []
shared_weights = dict()
for si in range(self.args.search_iterations):
if self.args.verbose:
print((50 * '='))
print('Search iteration {}/{} '.format(si, self.args.search_iterations))
for progression_index in range(self.args.max_progression_levels):
if self.args.verbose:
print((25 * '-'))
print('Progressive step {}/{} '.format(progression_index, self.args.max_progression_levels))
list_possible_layer_confs = get_possible_layer_configurations(progression_index)
all_configurations = tools.merge_unfolded_with_sampled(sampled_k_confs, list_possible_layer_confs, progression_index)
if ((si + progression_index) == 0):
all_accuracies = train_sampled_models(all_configurations, model_type, dataloaders, self.args, device, state_dict=shared_weights)
tools.update_surrogate_dataloader(s_data, all_configurations, all_accuracies)
tools.train_surrogate(surrogate, s_data, s_optim, s_crite, self.args, device)
if self.args.verbose:
print('Trained architectures: ')
print(list(zip(all_configurations, all_accuracies)))
else:
all_accuracies = tools.predict_accuracies_with_surrogate(all_configurations, surrogate, device)
if self.args.verbose:
print('Predicted accuracies: ')
print(list(zip(all_configurations, all_accuracies)))
if ((si + progression_index) == 0):
sampled_k_confs = tools.sample_k_configurations(all_configurations, all_accuracies, self.args.num_samples, temperature)
if self.args.verbose:
estimated_accuracies = tools.predict_accuracies_with_surrogate(all_configurations, surrogate, device)
diff = np.abs((np.array(estimated_accuracies) - np.array(all_accuracies)))
print('Error on accuracies = {}'.format(diff))
else:
sampled_k_confs = tools.sample_k_configurations(all_configurations, all_accuracies, self.args.num_samples, temperature)
sampled_k_accs = train_sampled_models(sampled_k_confs, model_type, dataloaders, self.args, device, state_dict=shared_weights)
tools.update_surrogate_dataloader(s_data, sampled_k_confs, sampled_k_accs)
err = tools.train_surrogate(surrogate, s_data, s_optim, s_crite, self.args, device)
if self.args.verbose:
print('Trained architectures: ')
print(list(zip(sampled_k_confs, sampled_k_accs)))
print('with surrogate error: {}'.format(err))
iteration = ((si * self.args.search_iterations) + progression_index)
temperature = tools.compute_temperature(iteration, self.args)
if self.args.verbose:
print('Temperature is being set to {}'.format(temperature))
return s_data
def _randsearch(self, model_type, dataloaders, dataset_searchmethods, device):
s_data = surr.SurrogateDataloader()
train_sampled_models = dataset_searchmethods['train_sampled_fun']
get_possible_layer_configurations = dataset_searchmethods['get_layer_confs']
sampled_k_confs = []
shared_weights = dict()
for si in range((self.args.search_iterations * self.args.max_progression_levels)):
if self.args.verbose:
print((50 * '='))
print('Random Search iteration {}/{} '.format(si, (self.args.search_iterations * self.args.max_progression_levels)))
sampled_k_confs = tools.sample_k_configurations_directly(self.args.num_samples, self.args.max_progression_levels, get_possible_layer_configurations)
sampled_k_accs = train_sampled_models(sampled_k_confs, model_type, dataloaders, self.args, device, state_dict=shared_weights)
tools.update_surrogate_dataloader(s_data, sampled_k_confs, sampled_k_accs)
if self.args.verbose:
print('Trained architectures: ')
print(list(zip(sampled_k_confs, sampled_k_accs)))
return s_data
|
class AVMNISTSearcher(ModelSearcher):
def __init__(self, args, device):
super(AVMNISTSearcher, self).__init__(args)
self.device = device
transformer = transforms.Compose([avmnist_data.ToTensor(), avmnist_data.Normalize((0.1307,), (0.3081,))])
dataset_training = avmnist_data.AVMnist(args.datadir, transform=transformer, stage='train')
dataset_validate = avmnist_data.AVMnist(args.datadir, transform=transformer, stage='train')
train_indices = list(range(0, 50000))
valid_indices = list(range(50000, 55000))
train_subset = Subset(dataset_training, train_indices)
valid_subset = Subset(dataset_validate, valid_indices)
trainloader = torch.utils.data.DataLoader(train_subset, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers)
devloader = torch.utils.data.DataLoader(valid_subset, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers)
self.dataloaders = {'train': trainloader, 'dev': devloader}
def search(self):
avmnist_searchmethods = {'train_sampled_fun': avmnist.train_sampled_models, 'get_layer_confs': avmnist.get_possible_layer_configurations}
if (not self.args.randsearch):
surrogate = surr.SimpleRecurrentSurrogate(100, 3, 100)
surrogate.to(self.device)
surrogate_dict = {'model': surrogate, 'criterion': torch.nn.MSELoss()}
return self._epnas(avmnist.Searchable_Audio_Image_Net, surrogate_dict, self.dataloaders, avmnist_searchmethods, self.device)
else:
return self._randsearch(avmnist.Searchable_Audio_Image_Net, self.dataloaders, avmnist_searchmethods, self.device)
|
class NTUSearcher(ModelSearcher):
def __init__(self, args, device):
super(NTUSearcher, self).__init__(args)
self.device = device
transformer_val = transforms.Compose([ntu_data.NormalizeLen(args.vid_len), ntu_data.ToTensor()])
transformer_tra = transforms.Compose([ntu_data.AugCrop(), ntu_data.NormalizeLen(args.vid_len), ntu_data.ToTensor()])
dataset_training = ntu_data.NTU(args.datadir, transform=transformer_tra, stage='trainexp', args=args)
dataset_dev = ntu_data.NTU(args.datadir, transform=transformer_val, stage='dev', args=args)
datasets = {'train': dataset_training, 'dev': dataset_dev}
self.dataloaders = {x: DataLoader(datasets[x], batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, drop_last=False) for x in ['train', 'dev']}
def search(self):
surrogate = surr.SimpleRecurrentSurrogate(100, 3, 100)
surrogate.to(self.device)
surrogate_dict = {'model': surrogate, 'criterion': torch.nn.MSELoss()}
ntu_searchmethods = {'train_sampled_fun': ntu.train_sampled_models, 'get_layer_confs': ntu.get_possible_layer_configurations}
return self._epnas(ntu.Searchable_Skeleton_Image_Net, surrogate_dict, self.dataloaders, ntu_searchmethods, self.device)
|
class CifarSearcher(ModelSearcher):
def __init__(self, args, device):
super(CifarSearcher, self).__init__(args)
self.device = device
train_indices = list(range(0, 45000))
valid_indices = list(range(45000, 50000))
transformer_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transformer_val = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transformers = {'train': transformer_train, 'test': transformer_val}
dataset_training = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transformers['train'])
dataset_validate = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transformers['train'])
train_subset = Subset(dataset_training, train_indices)
valid_subset = Subset(dataset_validate, valid_indices)
trainloader = torch.utils.data.DataLoader(train_subset, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers)
devloader = torch.utils.data.DataLoader(valid_subset, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers)
self.dataloaders = {'train': trainloader, 'dev': devloader}
def search(self):
surrogate = surr.SimpleRecurrentSurrogate(100, 4, 100)
surrogate.to(self.device)
surrogate_dict = {'model': surrogate, 'criterion': torch.nn.MSELoss()}
cifar_searchmethods = {'train_sampled_fun': cifar.train_sampled_models, 'get_layer_confs': cifar.get_possible_layer_configurations}
return self._epnas(cifar.Searchable_MicroCNN, surrogate_dict, self.dataloaders, cifar_searchmethods, self.device)
|
def list_pmhc_types():
return ['A0101_VTEHDTLLY_IE-1_CMV_binder', 'A0201_KTWGQYWQV_gp100_Cancer_binder', 'A0201_ELAGIGILTV_MART-1_Cancer_binder', 'A0201_CLLWSFQTSA_Tyrosinase_Cancer_binder', 'A0201_IMDQVPFSV_gp100_Cancer_binder', 'A0201_SLLMWITQV_NY-ESO-1_Cancer_binder', 'A0201_KVAELVHFL_MAGE-A3_Cancer_binder', 'A0201_KVLEYVIKV_MAGE-A1_Cancer_binder', 'A0201_CLLGTYTQDV_Kanamycin-B-dioxygenase_binder', 'A0201_LLDFVRFMGV_EBNA-3B_EBV_binder', 'A0201_LLMGTLGIVC_HPV-16E7_82-91_binder', 'A0201_CLGGLLTMV_LMP-2A_EBV_binder', 'A0201_YLLEMLWRL_LMP1_EBV_binder', 'A0201_FLYALALLL_LMP2A_EBV_binder', 'A0201_GILGFVFTL_Flu-MP_Influenza_binder', 'A0201_GLCTLVAML_BMLF1_EBV_binder', 'A0201_NLVPMVATV_pp65_CMV_binder', 'A0201_ILKEPVHGV_RT_HIV_binder', 'A0201_FLASKIGRLV_Ca2-indepen-Plip-A2_binder', 'A2402_CYTWNQMNL_WT1-(235-243)236M_Y_binder', 'A0201_RTLNAWVKV_Gag-protein_HIV_binder', 'A0201_KLQCVDLHV_PSA146-154_binder', 'A0201_LLFGYPVYV_HTLV-1_binder', 'A0201_SLFNTVATL_Gag-protein_HIV_binder', 'A0201_SLYNTVATLY_Gag-protein_HIV_binder', 'A0201_SLFNTVATLY_Gag-protein_HIV_binder', 'A0201_RMFPNAPYL_WT-1_binder', 'A0201_YLNDHLEPWI_BCL-X_Cancer_binder', 'A0201_MLDLQPETT_16E7_HPV_binder', 'A0301_KLGGALQAK_IE-1_CMV_binder', 'A0301_RLRAEAQVK_EMNA-3A_EBV_binder', 'A0301_RIAAWMATY_BCL-2L1_Cancer_binder', 'A1101_IVTDFSVIK_EBNA-3B_EBV_binder', 'A1101_AVFDRKSDAK_EBNA-3B_EBV_binder', 'B3501_IPSINVHHY_pp65_CMV_binder', 'A2402_AYAQKIFKI_IE-1_CMV_binder', 'A2402_QYDPVAALF_pp65_CMV_binder', 'B0702_QPRAPIRPI_EBNA-6_EBV_binder', 'B0702_TPRVTGGGAM_pp65_CMV_binder', 'B0702_RPPIFIRRL_EBNA-3A_EBV_binder', 'B0702_RPHERNGFTVL_pp65_CMV_binder', 'B0801_RAKFKQLL_BZLF1_EBV_binder', 'B0801_ELRRKMMYM_IE-1_CMV_binder', 'B0801_FLRGRAYGL_EBNA-3A_EBV_binder', 'A0101_SLEGGGLGY_NC_binder', 'A0101_STEGGGLAY_NC_binder', 'A0201_ALIAPVHAV_NC_binder', 'A2402_AYSSAGASI_NC_binder', 'B0702_GPAESAAGL_NC_binder', 'NR(B0801)_AAKGRGAAL_NC_binder']
|
def load_receptors(base_dir, pmhc):
receptors = {}
for subject in ['1', '2', '3', '4']:
barcodes = {}
path_csv = ((((base_dir + '/') + 'vdj_v1_hs_aggregated_donor') + subject) + '_all_contig_annotations.csv')
with open(path_csv, 'r') as stream:
reader = csv.DictReader(stream, delimiter=',')
for row in reader:
barcode = row['barcode']
if (barcode not in barcodes):
barcodes[barcode] = []
cdr3 = row['cdr3']
vgene = row['v_gene']
jgene = row['j_gene']
if (('None' not in cdr3) and ('*' not in cdr3) and ('None' not in vgene) and ('None' not in jgene)):
barcodes[barcode].append({'chain': row['chain'], 'cdr3': cdr3, 'vgene': vgene, 'jgene': jgene, 'full': (True if ('TRUE' in row['full_length']) else False)})
path_csv = ((((base_dir + '/') + 'vdj_v1_hs_aggregated_donor') + subject) + '_binarized_matrix.csv')
with open(path_csv, 'r') as stream:
reader = csv.DictReader(stream, delimiter=',')
for row in reader:
if ('True' in row[pmhc]):
pairings = []
barcode = row['barcode']
for sequence_tra in barcodes[barcode]:
if ('TRA' in sequence_tra['chain']):
for sequence_trb in barcodes[barcode]:
if ('TRB' in sequence_trb['chain']):
pairings.append(((((((((((sequence_tra['vgene'] + ':') + sequence_tra['cdr3']) + ':') + sequence_tra['jgene']) + ':') + sequence_trb['vgene']) + ':') + sequence_trb['cdr3']) + ':') + sequence_trb['jgene']))
for pairing in pairings:
if (pairing not in receptors):
receptors[pairing] = 1.0
else:
receptors[pairing] += 1.0
return receptors
|
def normalize_sample(receptors):
total_count = np.float64(0.0)
for quantity in receptors.values():
total_count += quantity
for receptor in receptors.keys():
receptors[receptor] /= total_count
return receptors
|
def collapse_samples(samples, labels):
receptors_collapse = {}
for (i, (receptors, label)) in enumerate(zip(samples, labels)):
for (receptor, quantity) in receptors.items():
if (receptor not in receptors_collapse):
receptors_collapse[receptor] = {}
if (label not in receptors_collapse[receptor]):
receptors_collapse[receptor][label] = quantity
else:
receptors_collapse[receptor][label] += quantity
print('WARNING: Duplicate label for the same receptor')
return receptors_collapse
|
def split_dataset(receptors, ratios):
rs = np.array(ratios, dtype=np.float64)
ss = (rs / np.sum(rs))
cs = np.cumsum(ss)
ps = np.pad(cs, [1, 0], 'constant', constant_values=0)
keys = list(receptors.keys())
np.random.shuffle(keys)
keys_split = []
for i in range(len(ratios)):
(j1, j2) = (len(keys) * ps[i:(i + 2)]).astype(int)
keys_split.append(keys[j1:j2])
receptors_split = []
for keys in keys_split:
receptor_split = {}
for key in keys:
receptor_split[key] = receptors[key]
receptors_split.append(receptor_split)
return receptors_split
|
def insert_receptors(path_db, name, receptors, max_cdr3_length=32):
labels = set()
for quantities in receptors.values():
labels.update(quantities.keys())
labels = sorted(list(labels))
dtype_receptor = ([('tra_vgene', 'S16'), ('tra_cdr3', ('S' + str(max_cdr3_length))), ('tra_jgene', 'S16'), ('trb_vgene', 'S16'), ('trb_cdr3', ('S' + str(max_cdr3_length))), ('trb_jgene', 'S16')] + [(('frequency_' + label), 'f8') for label in labels])
rs = np.zeros(len(receptors), dtype=dtype_receptor)
for (i, (receptor, quantities)) in enumerate(receptors.items()):
(tra_vgene, tra_cdr3, tra_jgene, trb_vgene, trb_cdr3, trb_jgene) = receptor.split(':')
rs[i]['tra_vgene'] = tra_vgene
rs[i]['tra_cdr3'] = tra_cdr3
rs[i]['tra_jgene'] = tra_jgene
rs[i]['trb_vgene'] = trb_vgene
rs[i]['trb_cdr3'] = trb_cdr3
rs[i]['trb_jgene'] = trb_jgene
for label in quantities.keys():
rs[i][('frequency_' + label)] = quantities[label]
flag = ('r+' if os.path.isfile(path_db) else 'w')
with h5py.File(path_db, flag) as db:
rs_db = db.create_dataset(name, (rs.size,), dtype_receptor)
rs_db[:] = rs
|
class Alignment(Layer):
def __init__(self, filters, weight_steps, penalties_feature=0.0, penalties_filter=0.0, length_normalize=False, kernel_initializer='uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):
self.filters = filters
self.weight_steps = weight_steps
self.penalties_feature = penalties_feature
self.penalties_filter = penalties_filter
self.length_normalize = length_normalize
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
super(__class__, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel', shape=[self.weight_steps, int(input_shape[2]), self.filters], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True)
self.bias = self.add_weight(name='bias', shape=[self.filters], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True)
super(__class__, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
if (mask is None):
return mask
return K.any(mask, axis=1)
def call(self, inputs, mask=None):
scores = alignment_score(inputs, mask, self.kernel, penalties_feature=self.penalties_feature, penalties_weight=self.penalties_filter)
if self.length_normalize:
lengths_feature = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True)
lengths_weight = K.cast(self.weight_steps, inputs.dtype)
lengths = K.minimum(lengths_feature, lengths_weight)
scores = (scores / K.sqrt(lengths))
logits = (scores + self.bias)
return logits
|
class Length(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
if (mask is None):
return mask
return K.any(mask, axis=1)
def call(self, inputs, mask=None):
lengths = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True)
return lengths
|
class NormalizeInitialization(Layer):
def __init__(self, epsilon=1e-05, **kwargs):
self.epsilon = epsilon
super(__class__, self).__init__(**kwargs)
def build(self, input_shape):
(input_shape, _) = input_shape
self.counter = self.add_weight(name='counter', shape=[1], initializer=Zeros(), trainable=False)
self.mean = self.add_weight(name='mean', shape=input_shape[1:], initializer=Zeros(), trainable=False)
self.variance = self.add_weight(name='variance', shape=input_shape[1:], initializer=Ones(), trainable=False)
super(__class__, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs):
(inputs, weights) = inputs
weights = (weights / tf.reduce_sum(weights))
weights_expand = tf.expand_dims(weights, axis=1)
(mean, variance) = tf.nn.weighted_moments(inputs, [0], weights_expand)
counter = K.update_add(self.counter, K.ones_like(self.counter))
init = K.sign((counter - K.ones_like(counter)))
mean = K.update(self.mean, ((init * self.mean) + ((1.0 - init) * mean)))
variance = K.update(self.variance, ((init * self.variance) + ((1.0 - init) * variance)))
mean_expand = tf.expand_dims(mean, axis=0)
variance_expand = tf.expand_dims(variance, axis=0)
outputs = ((inputs - mean_expand) / tf.sqrt((variance_expand + self.epsilon)))
return outputs
|
def load_similarity_matrix(filename):
similarity_matrix = {}
reader = csv.DictReader(open(filename, 'r'))
entries = []
for row in reader:
entries.append(row)
for k in reader.fieldnames:
if (len(k) < 1):
continue
similarity_matrix[k] = [float(obj[k]) for obj in entries]
return similarity_matrix
|
def print_matrix(m, cdr3):
max_col = len(cdr3)
print((' %11s' % ''), end='')
for col in range(0, max_col):
print((' %11s' % cdr3[col]), end='')
print('')
for row in range(0, 33):
for col in range(0, (max_col + 1)):
print((' %11.4f' % m[row][col]), end='')
print('')
|
def print_bp(bp, cdr3):
max_col = len(cdr3)
print((' %11s' % ''), end='')
for col in range(0, max_col):
print((' %11s' % cdr3[col]), end='')
print('')
for row in range(0, 33):
for col in range(0, (max_col + 1)):
print((' %11s' % bp[row][col]), end='')
print('')
|
def print_alignment(bp, cdr3):
cdr3_align = []
theta_align = []
max_col = len(cdr3)
col = max_col
row = 32
done = False
while (not done):
if (bp[row][col] == 'diag'):
theta_align.append(row)
cdr3_align.append(cdr3[(col - 1)])
row -= 1
col -= 1
elif (bp[row][col] == 'up'):
theta_align.append(row)
cdr3_align.append('.')
row -= 1
elif (bp[row][col] == 'left'):
theta_align.append('.')
cdr3_align.append(cdr3[(col - 1)])
col -= 1
else:
print('ERROR')
if ((row <= 0) or (col <= 0)):
done = True
if (row != 0):
for i in range(row, 0, (- 1)):
theta_align.append(i)
cdr3_align.append('.')
align_str = ''
for c in list(reversed(cdr3_align)):
align_str += c
return align_str
|
def do_alignment(sm, cdr3):
theta_gap = 0
cdr3_gap = (- 1000)
am = []
bp = []
for row in range(0, 33):
am.append([0.0 for col in range(0, 33)])
bp.append([None for col in range(0, 33)])
max_col = (len(cdr3) + 1)
score = 0
for row in range(0, 33):
am[row][0] = score
score += theta_gap
score = 0
for col in range(0, max_col):
am[0][col] = score
score += cdr3_gap
for col in range(1, max_col):
cdr3_pos = (col - 1)
for row in range(1, 33):
theta_pos = (row - 1)
up = (am[(row - 1)][col] + theta_gap)
diag = (am[(row - 1)][(col - 1)] + sm[cdr3[cdr3_pos]][theta_pos])
left = (am[row][(col - 1)] + cdr3_gap)
if (up > diag):
if (up > left):
am[row][col] = up
bp[row][col] = 'up'
else:
am[row][col] = left
bp[row][col] = 'left'
elif (diag > left):
am[row][col] = diag
bp[row][col] = 'diag'
else:
am[row][col] = left
bp[row][col] = 'left'
return [am, bp]
|
def do_file_alignment(input, output, sm_tra, sm_trb, tag):
reader = csv.DictReader(open(input, 'r'))
fieldnames = reader.fieldnames.copy()
fieldnames.append(('tra_alignment_' + tag))
fieldnames.append(('tra_score_' + tag))
fieldnames.append(('trb_alignment_' + tag))
fieldnames.append(('trb_score_' + tag))
writer = csv.DictWriter(open(output, 'w'), fieldnames=fieldnames)
writer.writeheader()
for row in reader:
r = 32
col = len(row['tra_cdr3'])
tra_align = do_alignment(sm_tra, row['tra_cdr3'])
row[('tra_alignment_' + tag)] = print_alignment(tra_align[1], row['tra_cdr3'])
row[('tra_score_' + tag)] = (tra_align[0][r][col] / math.sqrt(float(col)))
col = len(row['trb_cdr3'])
trb_align = do_alignment(sm_trb, row['trb_cdr3'])
row[('trb_alignment_' + tag)] = print_alignment(trb_align[1], row['trb_cdr3'])
row[('trb_score_' + tag)] = (trb_align[0][r][col] / math.sqrt(float(col)))
writer.writerow(row)
|
def test_alignment(sm, cdr3):
align = do_alignment(sm, cdr3)
print_matrix(align[0], cdr3)
print_bp(align[1], cdr3)
print(print_alignment(align[1], cdr3))
|
class GlobalPoolWithMask(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
return tf.reduce_any(mask, axis=1)
def call(self, inputs, mask=None):
indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2)
penalties = ((- 1e+16) * (1.0 - indicators))
outputs = tf.reduce_max((inputs + penalties), axis=1)
return outputs
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs):
kmer_size = 4
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = Conv1D(num_outputs, kmer_size)(features_tra_cdr3)
logits_tra_cdr3_mask = MaskCopy(trim_front=(kmer_size - 1))([logits_tra_cdr3, features_tra_mask])
logits_tra_cdr3_pool = GlobalPoolWithMask()(logits_tra_cdr3_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3_pool, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = Conv1D(num_outputs, kmer_size)(features_trb_cdr3)
logits_trb_cdr3_mask = MaskCopy(trim_front=(kmer_size - 1))([logits_trb_cdr3, features_trb_mask])
logits_trb_cdr3_pool = GlobalPoolWithMask()(logits_trb_cdr3_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3_pool, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model
|
class GlobalPoolWithMask(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
return tf.reduce_any(mask, axis=1)
def call(self, inputs, mask=None):
indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2)
penalties = ((- 1e+16) * (1.0 - indicators))
outputs = tf.reduce_max((inputs + penalties), axis=1)
return outputs
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs):
kmer_size = 4
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = Conv1D(8, kmer_size)(features_tra_cdr3)
logits_tra_cdr3 = Conv1D(num_outputs, kmer_size)(logits_tra_cdr3)
logits_tra_cdr3_mask = MaskCopy(trim_front=((2 * kmer_size) - 2))([logits_tra_cdr3, features_tra_mask])
logits_tra_cdr3_pool = GlobalPoolWithMask()(logits_tra_cdr3_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3_pool, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = Conv1D(8, kmer_size)(features_trb_cdr3)
logits_trb_cdr3 = Conv1D(num_outputs, kmer_size)(logits_trb_cdr3)
logits_trb_cdr3_mask = MaskCopy(trim_front=((2 * kmer_size) - 2))([logits_trb_cdr3, features_tra_mask])
logits_trb_cdr3_pool = GlobalPoolWithMask()(logits_trb_cdr3_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3_pool, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs, num_steps):
kmer_size = 5
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
features_tra_kmer = KMer(kmer_size)(features_tra_mask)
logits_tra_kmer = Alignment(num_outputs, ((num_steps - kmer_size) + 1), penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_tra_kmer)
logits_tra_kmer_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_kmer, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
features_trb_kmer = KMer(kmer_size)(features_trb_mask)
logits_trb_kmer = Alignment(num_outputs, ((num_steps - kmer_size) + 1), penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_trb_kmer)
logits_trb_kmer_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_kmer, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_kmer_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_kmer_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model
|
def handcrafted_features(data, tags):
basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9}
hydrophobicity = {'A': 0.16, 'B': (- 3.14), 'C': 2.5, 'D': (- 2.49), 'E': (- 1.5), 'F': 5.0, 'G': (- 3.31), 'H': (- 4.63), 'I': 4.41, 'K': (- 5.0), 'L': 4.76, 'M': 3.23, 'N': (- 3.79), 'P': (- 4.92), 'Q': (- 2.76), 'R': (- 2.77), 'S': (- 2.85), 'T': (- 1.08), 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.0, 'Z': (- 2.13)}
helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.0, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91}
mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31}
features_list = []
for chain in ['tra', 'trb']:
onehot_encoder = feature_extraction.DictVectorizer(sparse=False)
features_list.append(pd.DataFrame(onehot_encoder.fit_transform(data[[(chain + '_vgene'), (chain + '_jgene')]].to_dict(orient='records')), columns=onehot_encoder.feature_names_))
features_list.append(data[(chain + '_cdr3')].apply((lambda sequence: parser.length(sequence))).to_frame().rename(columns={(chain + '_cdr3'): 'length'}))
aa_counts = pd.DataFrame.from_records([parser.amino_acid_composition(sequence) for sequence in data[(chain + '_cdr3')]]).fillna(0)
aa_counts.columns = [(chain + '_count_{}'.format(column)) for column in aa_counts.columns]
features_list.append(aa_counts)
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([basicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_basicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_hydrophobicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([helicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_helicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: electrochem.pI(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'pI'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_mutation_stability'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: mass.fast_mass(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'mass'}))
(pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation) = [[] for _ in range(6)]
for sequence in data[(chain + '_cdr3')]:
length = parser.length(sequence)
start_pos = ((- 1) * (length // 2))
pos_range = (list(range(start_pos, (start_pos + length))) if ((length % 2) == 1) else (list(range(start_pos, 0)) + list(range(1, ((start_pos + length) + 1)))))
pos_aa.append({(chain + '_pos_{}_{}'.format(pos, aa)): 1 for (pos, aa) in zip(pos_range, sequence)})
pos_basicity.append({(chain + '_pos_{}_basicity'.format(pos)): basicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_hydro.append({(chain + '_pos_{}_hydrophobicity'.format(pos)): hydrophobicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_helicity.append({(chain + '_pos_{}_helicity'.format(pos)): helicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_pI.append({(chain + '_pos_{}_pI'.format(pos)): electrochem.pI(aa) for (pos, aa) in zip(pos_range, sequence)})
pos_mutation.append({(chain + '_pos_{}_mutation_stability'.format(pos)): mutation_stability[aa] for (pos, aa) in zip(pos_range, sequence)})
features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0))
features_list.append(data['weights'])
for tag in tags:
features_list.append(data[('labels_' + tag)])
features_list.append(data['split'])
data_processed = pd.concat(features_list, axis=1)
return data_processed
|
def load_datasets(path_db, splits, tags, uniform=False, permute=False):
num_categories = len(tags)
receptors_dict = {}
for split in splits:
with h5py.File(path_db, 'r') as db:
receptors = db[split][...]
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
indices = np.argwhere((weights > 0.0)).flatten()
receptors = receptors[indices]
if uniform:
for tag in tags:
receptors[('frequency_' + tag)] = np.sign(receptors[('frequency_' + tag)])
for tag in tags:
receptors[('frequency_' + tag)] /= np.sum(receptors[('frequency_' + tag)])
if ('tra_vgene' not in receptors_dict):
receptors_dict['tra_vgene'] = np.char.decode(receptors['tra_vgene'])
receptors_dict['tra_cdr3'] = np.char.decode(receptors['tra_cdr3'])
receptors_dict['tra_jgene'] = np.char.decode(receptors['tra_jgene'])
receptors_dict['trb_vgene'] = np.char.decode(receptors['trb_vgene'])
receptors_dict['trb_cdr3'] = np.char.decode(receptors['trb_cdr3'])
receptors_dict['trb_jgene'] = np.char.decode(receptors['trb_jgene'])
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
weights /= num_categories
receptors_dict['weights'] = weights
for (j, tag) in enumerate(tags):
receptors_dict[('labels_' + tag)] = (receptors[('frequency_' + tag)] / (num_categories * weights))
receptors_dict['split'] = [split for i in range(receptors.size)]
else:
receptors_dict['tra_vgene'] = np.concatenate([receptors_dict['tra_vgene'], np.char.decode(receptors['tra_vgene'])], axis=0)
receptors_dict['tra_cdr3'] = np.concatenate([receptors_dict['tra_cdr3'], np.char.decode(receptors['tra_cdr3'])], axis=0)
receptors_dict['tra_jgene'] = np.concatenate([receptors_dict['tra_jgene'], np.char.decode(receptors['tra_jgene'])], axis=0)
receptors_dict['trb_vgene'] = np.concatenate([receptors_dict['trb_vgene'], np.char.decode(receptors['trb_vgene'])], axis=0)
receptors_dict['trb_cdr3'] = np.concatenate([receptors_dict['trb_cdr3'], np.char.decode(receptors['trb_cdr3'])], axis=0)
receptors_dict['trb_jgene'] = np.concatenate([receptors_dict['trb_jgene'], np.char.decode(receptors['trb_jgene'])], axis=0)
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
weights /= num_categories
receptors_dict['weights'] = np.concatenate([receptors_dict['weights'], weights], axis=0)
for (j, tag) in enumerate(tags):
receptors_dict[('labels_' + tag)] = np.concatenate([receptors_dict[('labels_' + tag)], (receptors[('frequency_' + tag)] / (num_categories * weights))], axis=0)
receptors_dict['split'] = np.concatenate([receptors_dict['split'], [split for i in range(receptors.size)]], axis=0)
data = pd.DataFrame(receptors_dict)
data_processed = handcrafted_features(data, tags)
outputs_list = []
for split in splits:
conditions = (data_processed['split'] == split)
data_split = data_processed[conditions]
data_split.drop('split', axis=1)
features_split = data_split.drop((['weights', 'split'] + [('labels_' + tag) for tag in tags]), axis=1)
labels_split = data_split[[('labels_' + tag) for tag in tags]]
weights_split = data_split['weights']
xs_split = features_split.to_numpy()
ys_split = labels_split.to_numpy()
ws_split = weights_split.to_numpy()
if permute:
indices = np.arange(xs_split.shape[0])
np.random.shuffle(indices)
xs_split = xs_split[indices]
outputs_list.append(xs_split)
outputs_list.append(ys_split)
outputs_list.append(ws_split)
return outputs_list
|
def balanced_sampling(xs, ys, ws, batch_size):
rs = np.arange(xs.shape[0])
ws_ = (ws / np.sum(ws))
while True:
js = np.random.choice(rs, size=batch_size, p=ws_)
(yield (xs[js], ys[js]))
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs):
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = GRU(num_outputs)(features_tra_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = GRU(num_outputs)(features_trb_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs):
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = LSTM(num_outputs)(features_tra_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = LSTM(num_outputs)(features_trb_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model
|
def handcrafted_features(data, tags):
basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9}
hydrophobicity = {'A': 0.16, 'B': (- 3.14), 'C': 2.5, 'D': (- 2.49), 'E': (- 1.5), 'F': 5.0, 'G': (- 3.31), 'H': (- 4.63), 'I': 4.41, 'K': (- 5.0), 'L': 4.76, 'M': 3.23, 'N': (- 3.79), 'P': (- 4.92), 'Q': (- 2.76), 'R': (- 2.77), 'S': (- 2.85), 'T': (- 1.08), 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.0, 'Z': (- 2.13)}
helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.0, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91}
mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31}
features_list = []
for chain in ['tra', 'trb']:
onehot_encoder = feature_extraction.DictVectorizer(sparse=False)
features_list.append(pd.DataFrame(onehot_encoder.fit_transform(data[[(chain + '_vgene'), (chain + '_jgene')]].to_dict(orient='records')), columns=onehot_encoder.feature_names_))
features_list.append(data[(chain + '_cdr3')].apply((lambda sequence: parser.length(sequence))).to_frame().rename(columns={(chain + '_cdr3'): 'length'}))
aa_counts = pd.DataFrame.from_records([parser.amino_acid_composition(sequence) for sequence in data[(chain + '_cdr3')]]).fillna(0)
aa_counts.columns = [(chain + '_count_{}'.format(column)) for column in aa_counts.columns]
features_list.append(aa_counts)
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([basicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_basicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_hydrophobicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([helicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_helicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: electrochem.pI(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'pI'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_mutation_stability'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: mass.fast_mass(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'mass'}))
(pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation) = [[] for _ in range(6)]
for sequence in data[(chain + '_cdr3')]:
length = parser.length(sequence)
start_pos = ((- 1) * (length // 2))
pos_range = (list(range(start_pos, (start_pos + length))) if ((length % 2) == 1) else (list(range(start_pos, 0)) + list(range(1, ((start_pos + length) + 1)))))
pos_aa.append({(chain + '_pos_{}_{}'.format(pos, aa)): 1 for (pos, aa) in zip(pos_range, sequence)})
pos_basicity.append({(chain + '_pos_{}_basicity'.format(pos)): basicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_hydro.append({(chain + '_pos_{}_hydrophobicity'.format(pos)): hydrophobicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_helicity.append({(chain + '_pos_{}_helicity'.format(pos)): helicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_pI.append({(chain + '_pos_{}_pI'.format(pos)): electrochem.pI(aa) for (pos, aa) in zip(pos_range, sequence)})
pos_mutation.append({(chain + '_pos_{}_mutation_stability'.format(pos)): mutation_stability[aa] for (pos, aa) in zip(pos_range, sequence)})
features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0))
features_list.append(data['weights'])
for tag in tags:
features_list.append(data[('labels_' + tag)])
features_list.append(data['split'])
data_processed = pd.concat(features_list, axis=1)
return data_processed
|
def load_datasets(path_db, splits, tags, uniform=False, permute=False):
num_categories = len(tags)
receptors_dict = {}
for split in splits:
with h5py.File(path_db, 'r') as db:
receptors = db[split][...]
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
indices = np.argwhere((weights > 0.0)).flatten()
receptors = receptors[indices]
if uniform:
for tag in tags:
receptors[('frequency_' + tag)] = np.sign(receptors[('frequency_' + tag)])
for tag in tags:
receptors[('frequency_' + tag)] /= np.sum(receptors[('frequency_' + tag)])
if ('tra_vgene' not in receptors_dict):
receptors_dict['tra_vgene'] = np.char.decode(receptors['tra_vgene'])
receptors_dict['tra_cdr3'] = np.char.decode(receptors['tra_cdr3'])
receptors_dict['tra_jgene'] = np.char.decode(receptors['tra_jgene'])
receptors_dict['trb_vgene'] = np.char.decode(receptors['trb_vgene'])
receptors_dict['trb_cdr3'] = np.char.decode(receptors['trb_cdr3'])
receptors_dict['trb_jgene'] = np.char.decode(receptors['trb_jgene'])
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
weights /= num_categories
receptors_dict['weights'] = weights
for (j, tag) in enumerate(tags):
receptors_dict[('labels_' + tag)] = (receptors[('frequency_' + tag)] / (num_categories * weights))
receptors_dict['split'] = [split for i in range(receptors.size)]
else:
receptors_dict['tra_vgene'] = np.concatenate([receptors_dict['tra_vgene'], np.char.decode(receptors['tra_vgene'])], axis=0)
receptors_dict['tra_cdr3'] = np.concatenate([receptors_dict['tra_cdr3'], np.char.decode(receptors['tra_cdr3'])], axis=0)
receptors_dict['tra_jgene'] = np.concatenate([receptors_dict['tra_jgene'], np.char.decode(receptors['tra_jgene'])], axis=0)
receptors_dict['trb_vgene'] = np.concatenate([receptors_dict['trb_vgene'], np.char.decode(receptors['trb_vgene'])], axis=0)
receptors_dict['trb_cdr3'] = np.concatenate([receptors_dict['trb_cdr3'], np.char.decode(receptors['trb_cdr3'])], axis=0)
receptors_dict['trb_jgene'] = np.concatenate([receptors_dict['trb_jgene'], np.char.decode(receptors['trb_jgene'])], axis=0)
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
weights /= num_categories
receptors_dict['weights'] = np.concatenate([receptors_dict['weights'], weights], axis=0)
for (j, tag) in enumerate(tags):
receptors_dict[('labels_' + tag)] = np.concatenate([receptors_dict[('labels_' + tag)], (receptors[('frequency_' + tag)] / (num_categories * weights))], axis=0)
receptors_dict['split'] = np.concatenate([receptors_dict['split'], [split for i in range(receptors.size)]], axis=0)
data = pd.DataFrame(receptors_dict)
data_processed = handcrafted_features(data, tags)
outputs_list = []
for split in splits:
conditions = (data_processed['split'] == split)
data_split = data_processed[conditions]
data_split.drop('split', axis=1)
features_split = data_split.drop((['weights', 'split'] + [('labels_' + tag) for tag in tags]), axis=1)
labels_split = data_split[[('labels_' + tag) for tag in tags]]
weights_split = data_split['weights']
xs_split = features_split.to_numpy()
ys_split = labels_split.to_numpy()
ws_split = weights_split.to_numpy()
if permute:
indices = np.arange(xs_split.shape[0])
np.random.shuffle(indices)
xs_split = xs_split[indices]
outputs_list.append(xs_split)
outputs_list.append(ys_split)
outputs_list.append(ws_split)
return outputs_list
|
def balanced_sampling(xs, ys, ws, batch_size):
rs = np.arange(xs.shape[0])
ws_ = (ws / np.sum(ws))
while True:
js = np.random.choice(rs, size=batch_size, p=ws_)
(yield (xs[js], ys[js]))
|
def handcrafted_features(data, tags):
basicity = {'A': 206.4, 'B': 210.7, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1, 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6, 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0, 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'X': 210.2, 'Y': 213.1, 'Z': 214.9}
hydrophobicity = {'A': 0.16, 'B': (- 3.14), 'C': 2.5, 'D': (- 2.49), 'E': (- 1.5), 'F': 5.0, 'G': (- 3.31), 'H': (- 4.63), 'I': 4.41, 'K': (- 5.0), 'L': 4.76, 'M': 3.23, 'N': (- 3.79), 'P': (- 4.92), 'Q': (- 2.76), 'R': (- 2.77), 'S': (- 2.85), 'T': (- 1.08), 'V': 3.02, 'W': 4.88, 'X': 4.59, 'Y': 2.0, 'Z': (- 2.13)}
helicity = {'A': 1.24, 'B': 0.92, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97, 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96, 'R': 0.95, 'S': 1.0, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'X': 1.29, 'Y': 1.11, 'Z': 0.91}
mutation_stability = {'A': 13, 'C': 52, 'D': 11, 'E': 12, 'F': 32, 'G': 27, 'H': 15, 'I': 10, 'K': 24, 'L': 34, 'M': 6, 'N': 6, 'P': 20, 'Q': 10, 'R': 17, 'S': 10, 'T': 11, 'V': 17, 'W': 55, 'Y': 31}
features_list = []
for chain in ['tra', 'trb']:
onehot_encoder = feature_extraction.DictVectorizer(sparse=False)
features_list.append(pd.DataFrame(onehot_encoder.fit_transform(data[[(chain + '_vgene'), (chain + '_jgene')]].to_dict(orient='records')), columns=onehot_encoder.feature_names_))
features_list.append(data[(chain + '_cdr3')].apply((lambda sequence: parser.length(sequence))).to_frame().rename(columns={(chain + '_cdr3'): 'length'}))
aa_counts = pd.DataFrame.from_records([parser.amino_acid_composition(sequence) for sequence in data[(chain + '_cdr3')]]).fillna(0)
aa_counts.columns = [(chain + '_count_{}'.format(column)) for column in aa_counts.columns]
features_list.append(aa_counts)
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([basicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_basicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([hydrophobicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_hydrophobicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([helicity[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_helicity'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: electrochem.pI(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'pI'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: (sum([mutation_stability[aa] for aa in seq]) / parser.length(seq)))).to_frame().rename(columns={(chain + '_cdr3'): 'avg_mutation_stability'}))
features_list.append(data[(chain + '_cdr3')].apply((lambda seq: mass.fast_mass(seq))).to_frame().rename(columns={(chain + '_cdr3'): 'mass'}))
(pos_aa, pos_basicity, pos_hydro, pos_helicity, pos_pI, pos_mutation) = [[] for _ in range(6)]
for sequence in data[(chain + '_cdr3')]:
length = parser.length(sequence)
start_pos = ((- 1) * (length // 2))
pos_range = (list(range(start_pos, (start_pos + length))) if ((length % 2) == 1) else (list(range(start_pos, 0)) + list(range(1, ((start_pos + length) + 1)))))
pos_aa.append({(chain + '_pos_{}_{}'.format(pos, aa)): 1 for (pos, aa) in zip(pos_range, sequence)})
pos_basicity.append({(chain + '_pos_{}_basicity'.format(pos)): basicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_hydro.append({(chain + '_pos_{}_hydrophobicity'.format(pos)): hydrophobicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_helicity.append({(chain + '_pos_{}_helicity'.format(pos)): helicity[aa] for (pos, aa) in zip(pos_range, sequence)})
pos_pI.append({(chain + '_pos_{}_pI'.format(pos)): electrochem.pI(aa) for (pos, aa) in zip(pos_range, sequence)})
pos_mutation.append({(chain + '_pos_{}_mutation_stability'.format(pos)): mutation_stability[aa] for (pos, aa) in zip(pos_range, sequence)})
features_list.append(pd.DataFrame.from_records(pos_aa).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_basicity).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_hydro).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_helicity).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_pI).fillna(0))
features_list.append(pd.DataFrame.from_records(pos_mutation).fillna(0))
features_list.append(data['weights'])
for tag in tags:
features_list.append(data[('labels_' + tag)])
features_list.append(data['split'])
data_processed = pd.concat(features_list, axis=1)
return data_processed
|
def load_datasets(path_db, splits, tags, uniform=False, permute=False):
num_categories = len(tags)
receptors_dict = {}
for split in splits:
with h5py.File(path_db, 'r') as db:
receptors = db[split][...]
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
indices = np.argwhere((weights > 0.0)).flatten()
receptors = receptors[indices]
if uniform:
for tag in tags:
receptors[('frequency_' + tag)] = np.sign(receptors[('frequency_' + tag)])
for tag in tags:
receptors[('frequency_' + tag)] /= np.sum(receptors[('frequency_' + tag)])
if ('tra_vgene' not in receptors_dict):
receptors_dict['tra_vgene'] = np.char.decode(receptors['tra_vgene'])
receptors_dict['tra_cdr3'] = np.char.decode(receptors['tra_cdr3'])
receptors_dict['tra_jgene'] = np.char.decode(receptors['tra_jgene'])
receptors_dict['trb_vgene'] = np.char.decode(receptors['trb_vgene'])
receptors_dict['trb_cdr3'] = np.char.decode(receptors['trb_cdr3'])
receptors_dict['trb_jgene'] = np.char.decode(receptors['trb_jgene'])
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
weights /= num_categories
receptors_dict['weights'] = weights
for (j, tag) in enumerate(tags):
receptors_dict[('labels_' + tag)] = (receptors[('frequency_' + tag)] / (num_categories * weights))
receptors_dict['split'] = [split for i in range(receptors.size)]
else:
receptors_dict['tra_vgene'] = np.concatenate([receptors_dict['tra_vgene'], np.char.decode(receptors['tra_vgene'])], axis=0)
receptors_dict['tra_cdr3'] = np.concatenate([receptors_dict['tra_cdr3'], np.char.decode(receptors['tra_cdr3'])], axis=0)
receptors_dict['tra_jgene'] = np.concatenate([receptors_dict['tra_jgene'], np.char.decode(receptors['tra_jgene'])], axis=0)
receptors_dict['trb_vgene'] = np.concatenate([receptors_dict['trb_vgene'], np.char.decode(receptors['trb_vgene'])], axis=0)
receptors_dict['trb_cdr3'] = np.concatenate([receptors_dict['trb_cdr3'], np.char.decode(receptors['trb_cdr3'])], axis=0)
receptors_dict['trb_jgene'] = np.concatenate([receptors_dict['trb_jgene'], np.char.decode(receptors['trb_jgene'])], axis=0)
weights = 0.0
for tag in tags:
weights += receptors[('frequency_' + tag)]
weights /= num_categories
receptors_dict['weights'] = np.concatenate([receptors_dict['weights'], weights], axis=0)
for (j, tag) in enumerate(tags):
receptors_dict[('labels_' + tag)] = np.concatenate([receptors_dict[('labels_' + tag)], (receptors[('frequency_' + tag)] / (num_categories * weights))], axis=0)
receptors_dict['split'] = np.concatenate([receptors_dict['split'], [split for i in range(receptors.size)]], axis=0)
data = pd.DataFrame(receptors_dict)
data_processed = handcrafted_features(data, tags)
outputs_list = []
for split in splits:
conditions = (data_processed['split'] == split)
data_split = data_processed[conditions]
data_split.drop('split', axis=1)
features_split = data_split.drop((['weights', 'split'] + [('labels_' + tag) for tag in tags]), axis=1)
labels_split = data_split[[('labels_' + tag) for tag in tags]]
weights_split = data_split['weights']
xs_split = features_split.to_numpy()
ys_split = labels_split.to_numpy()
ws_split = weights_split.to_numpy()
if permute:
indices = np.arange(xs_split.shape[0])
np.random.shuffle(indices)
xs_split = xs_split[indices]
outputs_list.append(xs_split)
outputs_list.append(ys_split)
outputs_list.append(ws_split)
return outputs_list
|
def label_float2int(ys, num_classes):
ys_index = np.argmax(ys, axis=1)
ys_onehot = np.squeeze(np.eye(num_classes)[ys_index.reshape((- 1))])
ys_hard = ys_onehot.astype(np.int64)
return ys_hard
|
def crossentropy(labels, logits, weights):
weights = (weights / tf.reduce_sum(weights))
costs = ((- tf.reduce_sum((labels * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
cost = tf.reduce_sum((weights * costs))
return cost
|
def accuracy(labels, logits, weights):
probabilities = tf.math.softmax(logits)
weights = (weights / tf.reduce_sum(weights))
corrects = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(probabilities, axis=1)), probabilities.dtype)
accuracy = tf.reduce_sum((weights * corrects))
return accuracy
|
def find_threshold(labels, logits, weights, target_accuracy):
probabilities = tf.math.softmax(logits)
weights = (weights / tf.reduce_sum(weights))
entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
corrects = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(probabilities, axis=1)), probabilities.dtype)
indices_sorted = tf.argsort(entropies, axis=0)
entropies_sorted = tf.gather(entropies, indices_sorted)
corrects_sorted = tf.gather(corrects, indices_sorted)
weights_sorted = tf.gather(weights, indices_sorted)
numerators_sorted = tf.math.cumsum((weights_sorted * corrects_sorted), axis=0)
denominators_sorted = tf.math.cumsum(weights_sorted, axis=0)
accuracies_sorted = (numerators_sorted / denominators_sorted)
range = (tf.math.cumsum(tf.ones_like(accuracies_sorted, dtype=tf.int64), axis=0) - 1)
indices_threshold = tf.where((accuracies_sorted > tf.constant(target_accuracy, accuracies_sorted.dtype)), range, tf.zeros_like(range))
index_threshold = tf.reduce_max(indices_threshold)
entropy_threshold = tf.gather(entropies_sorted, index_threshold)
return entropy_threshold
|
def accuracy_with_threshold(labels, logits, weights, threshold):
probabilities = tf.math.softmax(logits)
weights = (weights / tf.reduce_sum(weights))
entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
corrects = tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(probabilities, axis=1)), probabilities.dtype)
masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies))
accuracy_mask = tf.math.divide(tf.reduce_sum(((weights * masks) * corrects)), tf.reduce_sum((weights * masks)))
return accuracy_mask
|
def crossentropy_with_threshold(labels, logits, weights, threshold):
probabilities = tf.math.softmax(logits)
weights = (weights / tf.reduce_sum(weights))
entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
costs = ((- tf.reduce_sum((labels * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies))
cost_mask = tf.math.divide(tf.reduce_sum(((weights * masks) * costs)), tf.reduce_sum((weights * masks)))
return cost_mask
|
def fraction_with_threshold(logits, weights, threshold):
probabilities = tf.math.softmax(logits)
weights = (weights / tf.reduce_sum(weights))
entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies))
fraction_mask = tf.reduce_sum((weights * masks))
return fraction_mask
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs, num_steps):
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
weights = Input(shape=[])
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_tra_mask)
logits_tra_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_cdr3, weights])
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_length, weights])
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_vgene, weights])
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_tra_jgene, weights])
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_trb_mask)
logits_trb_cdr3_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_cdr3, weights])
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_length, weights])
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_vgene, weights])
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = NormalizeInitialization(epsilon=0.0)([logits_trb_jgene, weights])
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = NormalizeInitialization(epsilon=0.0)([logits, weights])
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene, weights], outputs=logits_norm)
return model
|
def generate_model(input_shape_tra_cdr3, input_shape_tra_vgene, input_shape_tra_jgene, input_shape_trb_cdr3, input_shape_trb_vgene, input_shape_trb_jgene, num_outputs, num_steps):
features_tra_cdr3 = Input(shape=input_shape_tra_cdr3)
features_tra_vgene = Input(shape=input_shape_tra_vgene)
features_tra_jgene = Input(shape=input_shape_tra_jgene)
features_trb_cdr3 = Input(shape=input_shape_trb_cdr3)
features_trb_vgene = Input(shape=input_shape_trb_vgene)
features_trb_jgene = Input(shape=input_shape_trb_jgene)
features_tra_mask = Masking(mask_value=0.0)(features_tra_cdr3)
features_tra_length = Length()(features_tra_mask)
logits_tra_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_tra_mask)
logits_tra_cdr3_norm = BatchNormalization(momentum=0.5)(logits_tra_cdr3)
logits_tra_length = Dense(num_outputs)(features_tra_length)
logits_tra_length_norm = BatchNormalization(momentum=0.5)(logits_tra_length)
logits_tra_vgene = Dense(num_outputs)(features_tra_vgene)
logits_tra_vgene_norm = BatchNormalization(momentum=0.5)(logits_tra_vgene)
logits_tra_jgene = Dense(num_outputs)(features_tra_jgene)
logits_tra_jgene_norm = BatchNormalization(momentum=0.5)(logits_tra_jgene)
features_trb_mask = Masking(mask_value=0.0)(features_trb_cdr3)
features_trb_length = Length()(features_trb_mask)
logits_trb_cdr3 = Alignment(num_outputs, num_steps, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_trb_mask)
logits_trb_cdr3_norm = BatchNormalization(momentum=0.5)(logits_trb_cdr3)
logits_trb_length = Dense(num_outputs)(features_trb_length)
logits_trb_length_norm = BatchNormalization(momentum=0.5)(logits_trb_length)
logits_trb_vgene = Dense(num_outputs)(features_trb_vgene)
logits_trb_vgene_norm = BatchNormalization(momentum=0.5)(logits_trb_vgene)
logits_trb_jgene = Dense(num_outputs)(features_trb_jgene)
logits_trb_jgene_norm = BatchNormalization(momentum=0.5)(logits_trb_jgene)
logits = Add()([logits_tra_cdr3_norm, logits_tra_length_norm, logits_tra_vgene_norm, logits_tra_jgene_norm, logits_trb_cdr3_norm, logits_trb_length_norm, logits_trb_vgene_norm, logits_trb_jgene_norm])
logits_norm = BatchNormalization(momentum=0.5)(logits)
probabilities = Activation('softmax')(logits_norm)
model = Model(inputs=[features_tra_cdr3, features_tra_vgene, features_tra_jgene, features_trb_cdr3, features_trb_vgene, features_trb_jgene], outputs=probabilities)
return model
|
def balanced_sampling(xs, ys, ws, batch_size):
rs = np.arange(xs[0].shape[0])
ws_ = (ws / np.sum(ws))
while True:
js = np.random.choice(rs, size=batch_size, p=ws_)
(yield ((xs[0][js], xs[1][js], xs[2][js], xs[3][js], xs[4][js], xs[5][js]), ys[js]))
|
def balanced_sampling(xs, ys, ws, batch_size):
rs = np.arange(xs[0].shape[0])
ws_ = (ws / np.sum(ws))
while True:
js = np.random.choice(rs, size=batch_size, p=ws_)
(yield ((xs[0][js], xs[1][js], xs[2][js], xs[3][js], xs[4][js], xs[5][js]), ys[js]))
|
def load_receptors(path_tsv, min_cdr3_length=8, max_cdr3_length=32):
receptors = {}
with open(path_tsv, 'r') as stream:
reader = csv.DictReader(stream, delimiter='\t')
for row in reader:
nns = row['nucleotide']
cdr3 = row['aminoAcid']
vgene = row['vGeneName']
dgene = row['dGeneName']
jgene = row['jGeneName']
quantity = np.float64(row['frequencyCount (%)'])
status = row['sequenceStatus']
if (('In' in status) and (min_cdr3_length <= len(cdr3)) and (len(cdr3) <= max_cdr3_length)):
if (cdr3 not in receptors):
receptors[cdr3] = quantity
else:
receptors[cdr3] += quantity
return receptors
|
def normalize_receptors(receptors):
total_quantity = np.float64(0.0)
for quantity in sorted(receptors.values()):
total_quantity += quantity
for receptor in receptors.keys():
receptors[receptor] /= total_quantity
return receptors
|
def insert_receptors(path_db, name, receptors, max_cdr3_length=32):
dtype = [('cdr3', ('S' + str(max_cdr3_length))), ('frequency', 'f8')]
rs = np.zeros(len(receptors), dtype=dtype)
for (i, cdr3) in enumerate(sorted(receptors, key=receptors.get, reverse=True)):
rs[i]['cdr3'] = cdr3
rs[i]['frequency'] = receptors[cdr3]
flag = ('r+' if os.path.isfile(path_db) else 'w')
with h5py.File(path_db, flag) as db:
rs_db = db.create_dataset(name, (rs.size,), dtype)
rs_db[:] = rs
|
def insert_samples(path_db, name, samples):
dtype = [('sample', 'S32'), ('age', 'f8'), ('label', 'f8'), ('weight', 'f8')]
ss = np.zeros(len(samples), dtype=dtype)
num_pos = 0.0
for (i, sample) in enumerate(sorted(samples.keys())):
if (samples[sample]['diagnosis'] > 0.5):
num_pos += 1.0
num_neg = (len(samples) - num_pos)
for (i, sample) in enumerate(sorted(samples.keys())):
ss[i]['sample'] = sample
ss[i]['age'] = samples[sample]['age']
ss[i]['label'] = (1.0 if samples[sample]['diagnosis'] else 0.0)
ss[i]['weight'] = ((0.5 / num_pos) if samples[sample]['diagnosis'] else (0.5 / num_neg))
flag = ('r+' if os.path.isfile(path_db) else 'w')
with h5py.File(path_db, flag) as db:
ss_db = db.create_dataset(name, (ss.size,), dtype)
ss_db[:] = ss
|
class Abundance(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
return mask
def call(self, inputs, mask=None):
inputs_expand = K.expand_dims(inputs, axis=1)
outputs = K.log(inputs_expand)
return outputs
|
class Alignment(Layer):
def __init__(self, filters, weight_steps, penalties_feature=0.0, penalties_filter=0.0, length_normalize=False, kernel_initializer='uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):
self.filters = filters
self.weight_steps = weight_steps
self.penalties_feature = penalties_feature
self.penalties_filter = penalties_filter
self.length_normalize = length_normalize
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
super(__class__, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel', shape=[self.weight_steps, int(input_shape[2]), self.filters], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True)
self.bias = self.add_weight(name='bias', shape=[self.filters], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True)
super(__class__, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
if (mask is None):
return mask
return K.any(mask, axis=1)
def call(self, inputs, mask=None):
scores = alignment_score(inputs, mask, self.kernel, penalties_feature=self.penalties_feature, penalties_weight=self.penalties_filter)
if self.length_normalize:
lengths_feature = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True)
lengths_weight = K.cast(self.weight_steps, inputs.dtype)
lengths = K.minimum(lengths_feature, lengths_weight)
scores = (scores / K.sqrt(lengths))
logits = (scores + self.bias)
return logits
|
class BatchExpand(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def call(self, inputs, mask=None):
(x, y) = inputs
outputs = (x * K.ones_like(y, dtype=x.dtype))
return outputs
|
class FullFlatten(Layer):
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
outputs = tf.reshape(inputs, [(- 1)])
return outputs
|
class Length(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
if (mask is None):
return mask
return K.any(mask, axis=1)
def call(self, inputs, mask=None):
lengths = K.sum(K.cast(mask, dtype=inputs.dtype), axis=1, keepdims=True)
return lengths
|
class NormalizeInitializationByAggregation(Layer):
def __init__(self, level, epsilon=1e-05, **kwargs):
self.level = level
self.epsilon = epsilon
super(__class__, self).__init__(**kwargs)
def build(self, input_shape):
(input_shape, _, _) = input_shape
self.numerator = self.add_weight(name='mean', shape=input_shape[1:], initializer=Zeros(), trainable=False)
self.numerator_sq = self.add_weight(name='numerator_sq', shape=input_shape[1:], initializer=Zeros(), trainable=False)
self.denominator = self.add_weight(name='denominator', shape=[1], initializer=Constant(1e-05), trainable=False)
super(__class__, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs):
(inputs, weights, level_) = inputs
level = tf.reshape(tf.cast(self.level, level_.dtype), [1])
weights_expand = tf.expand_dims(weights, axis=1)
numerator_block = tf.reduce_sum((weights_expand * inputs), axis=0)
numerator_sq_block = tf.reduce_sum((weights_expand * (inputs ** 2)), axis=0)
denominator_block = tf.reduce_sum(weights_expand, axis=0)
indicator = tf.cast(tf.equal(level, level_), numerator_block.dtype)
numerator = K.update_add(self.numerator, (indicator * numerator_block))
numerator_sq = K.update_add(self.numerator_sq, (indicator * numerator_sq_block))
denominator = K.update_add(self.denominator, (indicator * denominator_block))
mean = (numerator / denominator)
variance = ((numerator_sq / denominator) - (mean ** 2))
mean_expand = tf.expand_dims(mean, axis=0)
variance_expand = tf.expand_dims(variance, axis=0)
outputs = ((inputs - mean_expand) / tf.sqrt((variance_expand + self.epsilon)))
return outputs
|
def load_similarity_matrix(filename):
similarity_matrix = {}
reader = csv.DictReader(open(filename, 'r'))
entries = []
for row in reader:
entries.append(row)
for k in reader.fieldnames:
if (len(k) < 1):
continue
similarity_matrix[k] = [float(obj[k]) for obj in entries]
return similarity_matrix
|
def print_matrix(m, cdr3):
max_col = len(cdr3)
print((' %11s' % ''), end='')
for col in range(0, max_col):
print((' %11s' % cdr3[col]), end='')
print('')
for row in range(0, 9):
for col in range(0, (max_col + 1)):
print((' %11.4f' % m[row][col]), end='')
print('')
|
def print_bp(bp, cdr3):
max_col = len(cdr3)
print((' %11s' % ''), end='')
for col in range(0, max_col):
print((' %11s' % cdr3[col]), end='')
print('')
for row in range(0, 9):
for col in range(0, (max_col + 1)):
print((' %11s' % bp[row][col]), end='')
print('')
|
def print_alignment(bp, cdr3):
cdr3_align = []
theta_align = []
max_col = len(cdr3)
col = max_col
row = 8
done = False
while (not done):
if (bp[row][col] == 'diag'):
theta_align.append(row)
cdr3_align.append(cdr3[(col - 1)])
row -= 1
col -= 1
elif (bp[row][col] == 'up'):
theta_align.append(row)
cdr3_align.append('.')
row -= 1
elif (bp[row][col] == 'left'):
theta_align.append('.')
cdr3_align.append(cdr3[(col - 1)])
col -= 1
else:
print('ERROR')
if ((row <= 0) or (col <= 0)):
done = True
if (row != 0):
for i in range(row, 0, (- 1)):
theta_align.append(i)
cdr3_align.append('.')
if (col != 0):
for i in range(col, 0, (- 1)):
theta_align.append('.')
cdr3_align.append(cdr3[(col - 1)])
align_str = ''
for c in list(reversed(theta_align)):
align_str += str(c)
return align_str
|
def do_alignment(sm, cdr3):
theta_gap = (- 1000)
cdr3_gap = 0
max_col = (len(cdr3) + 1)
am = []
bp = []
for row in range(0, 9):
am.append([0.0 for col in range(0, max_col)])
bp.append([None for col in range(0, max_col)])
score = 0
for row in range(0, 9):
am[row][0] = score
score += theta_gap
score = 0
for col in range(0, max_col):
am[0][col] = score
score += cdr3_gap
for col in range(1, max_col):
cdr3_pos = (col - 1)
for row in range(1, 9):
theta_pos = (row - 1)
up = (am[(row - 1)][col] + theta_gap)
diag = (am[(row - 1)][(col - 1)] + sm[cdr3[cdr3_pos]][theta_pos])
left = (am[row][(col - 1)] + cdr3_gap)
if (up > diag):
if (up > left):
am[row][col] = up
bp[row][col] = 'up'
else:
am[row][col] = left
bp[row][col] = 'left'
elif (diag > left):
am[row][col] = diag
bp[row][col] = 'diag'
else:
am[row][col] = left
bp[row][col] = 'left'
return [am, bp]
|
def do_file_alignment(input, output, sm_tra, sm_trb, tag):
reader = csv.DictReader(open(input, 'r'))
fieldnames = reader.fieldnames.copy()
fieldnames.append(('tra_alignment_' + tag))
fieldnames.append(('tra_score_' + tag))
fieldnames.append(('trb_alignment_' + tag))
fieldnames.append(('trb_score_' + tag))
writer = csv.DictWriter(open(output, 'w'), fieldnames=fieldnames)
writer.writeheader()
for row in reader:
r = 32
col = len(row['tra_cdr3'])
tra_align = do_alignment(sm_tra, row['tra_cdr3'])
row[('tra_alignment_' + tag)] = print_alignment(tra_align[1], row['tra_cdr3'])
row[('tra_score_' + tag)] = (tra_align[0][r][col] / math.sqrt(float(col)))
col = len(row['trb_cdr3'])
trb_align = do_alignment(sm_trb, row['trb_cdr3'])
row[('trb_alignment_' + tag)] = print_alignment(trb_align[1], row['trb_cdr3'])
row[('trb_score_' + tag)] = (trb_align[0][r][col] / math.sqrt(float(col)))
writer.writerow(row)
|
def test_alignment(sm, cdr3):
align = do_alignment(sm, cdr3)
print_matrix(align[0], cdr3)
print_bp(align[1], cdr3)
print(print_alignment(align[1], cdr3))
|
class BatchExpand(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def call(self, inputs, mask=None):
(x, y) = inputs
outputs = (x * K.ones_like(y, dtype=x.dtype))
return outputs
|
class GlobalPoolWithMask(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
return tf.reduce_any(mask, axis=1)
def call(self, inputs, mask=None):
indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2)
penalties = ((- 1e+16) * (1.0 - indicators))
outputs = tf.reduce_max((inputs + penalties), axis=1)
return outputs
|
def generate_model(input_shape_cdr3, num_outputs, filter_size):
features_cdr3 = Input(shape=input_shape_cdr3)
features_quantity = Input(shape=[])
feature_age = Input(batch_shape=[1])
weight = Input(batch_shape=[1])
level = Input(batch_shape=[1])
features_mask = Masking(mask_value=0.0)(features_cdr3)
features_length = Length()(features_mask)
features_abundance = Abundance()(features_quantity)
features_age = BatchExpand()([feature_age, features_abundance])
weights_instance = Multiply()([weight, features_quantity])
logits_cdr3 = Alignment(num_outputs, filter_size, penalties_feature=(- 1e+16), penalties_filter=0.0, length_normalize=True)(features_mask)
logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level])
feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level])
logits_length = Dense(num_outputs)(feature_length_norm)
logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level])
features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level])
logits_abundance = Dense(num_outputs)(features_abundance_norm)
logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level])
features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level])
logits_age = Dense(num_outputs)(features_age_norm)
logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level])
logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm])
logits_aggregate = Aggregate()(logits)
logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level])
logits_flat = FullFlatten()(logits_aggregate_norm)
model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat)
return model
|
def generate_model(input_shape_cdr3, num_outputs, filter_size):
features_cdr3 = Input(shape=input_shape_cdr3)
features_quantity = Input(shape=[])
feature_age = Input(batch_shape=[1])
weight = Input(batch_shape=[1])
level = Input(batch_shape=[1])
features_mask = Masking(mask_value=0.0)(features_cdr3)
features_length = Length()(features_mask)
features_abundance = Abundance()(features_quantity)
features_age = BatchExpand()([feature_age, features_abundance])
weights_instance = Multiply()([weight, features_quantity])
num_filters = (2 * num_outputs)
logits_cdr3 = Alignment(num_filters, filter_size, penalties_feature=0.0, penalties_filter=(- 1e+16), length_normalize=False)(features_mask)
logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level])
feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level])
logits_length = Dense(num_filters)(feature_length_norm)
logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level])
features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level])
logits_abundance = Dense(num_filters)(features_abundance_norm)
logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level])
features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level])
logits_age = Dense(num_filters)(features_age_norm)
logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level])
logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm])
logits_aggregate = Aggregate2Instances()(logits)
logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level])
logits_flat = FullFlatten()(logits_aggregate_norm)
model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat)
return model
|
class BatchExpand(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def call(self, inputs, mask=None):
(x, y) = inputs
outputs = (x * K.ones_like(y, dtype=x.dtype))
return outputs
|
class GlobalPoolWithMask(Layer):
def __init__(self, **kwargs):
super(__class__, self).__init__(**kwargs)
def compute_mask(self, inputs, mask=None):
return tf.reduce_any(mask, axis=1)
def call(self, inputs, mask=None):
indicators = tf.expand_dims(tf.cast(mask, dtype=inputs.dtype), axis=2)
penalties = ((- 1e+16) * (1.0 - indicators))
outputs = tf.reduce_max((inputs + penalties), axis=1)
return outputs
|
def generate_model(input_shape_cdr3, num_outputs, filter_size):
features_cdr3 = Input(shape=input_shape_cdr3)
features_quantity = Input(shape=[])
feature_age = Input(batch_shape=[1])
weight = Input(batch_shape=[1])
level = Input(batch_shape=[1])
features_mask = Masking(mask_value=0.0)(features_cdr3)
features_length = Length()(features_mask)
features_abundance = Abundance()(features_quantity)
features_age = BatchExpand()([feature_age, features_abundance])
weights_instance = Multiply()([weight, features_quantity])
logits_cdr3 = Conv1D(num_outputs, filter_size)(features_cdr3)
logits_cdr3_mask = MaskCopy(trim_front=(filter_size - 1))([logits_cdr3, features_mask])
logits_cdr3_pool = GlobalPoolWithMask()(logits_cdr3_mask)
logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3_pool, weights_instance, level])
feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level])
logits_length = Dense(num_outputs)(feature_length_norm)
logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level])
features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level])
logits_abundance = Dense(num_outputs)(features_abundance_norm)
logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level])
features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level])
logits_age = Dense(num_outputs)(features_age_norm)
logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level])
logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm])
logits_aggregate = Aggregate()(logits)
logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level])
logits_flat = FullFlatten()(logits_aggregate_norm)
model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat)
return model
|
def generate_model(input_shape_cdr3, num_outputs, filter_size):
features_cdr3 = Input(shape=input_shape_cdr3)
features_quantity = Input(shape=[])
feature_age = Input(batch_shape=[1])
weight = Input(batch_shape=[1])
level = Input(batch_shape=[1])
features_mask = Masking(mask_value=0.0)(features_cdr3)
features_length = Length()(features_mask)
features_abundance = Abundance()(features_quantity)
features_age = BatchExpand()([feature_age, features_abundance])
weights_instance = Multiply()([weight, features_quantity])
logits_cdr3 = Alignment(num_outputs, filter_size, penalties_feature=0.0, penalties_filter=(- 1e+16), length_normalize=False)(features_mask)
logits_cdr3_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_cdr3, weights_instance, level])
feature_length_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_length, weights_instance, level])
logits_length = Dense(num_outputs)(feature_length_norm)
logits_length_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_length, weights_instance, level])
features_abundance_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_abundance, weights_instance, level])
logits_abundance = Dense(num_outputs)(features_abundance_norm)
logits_abundance_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_abundance, weights_instance, level])
features_age_norm = NormalizeInitializationByAggregation(0, epsilon=1e-05)([features_age, weights_instance, level])
logits_age = Dense(num_outputs)(features_age_norm)
logits_age_norm = NormalizeInitializationByAggregation(1, epsilon=1e-05)([logits_age, weights_instance, level])
logits = Add()([logits_cdr3_norm, logits_length_norm, logits_abundance_norm, logits_age_norm])
logits_aggregate = Aggregate()(logits)
logits_aggregate_norm = NormalizeInitializationByAggregation(2, epsilon=1e-05)([logits_aggregate, weight, level])
logits_flat = FullFlatten()(logits_aggregate_norm)
model = Model(inputs=[features_cdr3, features_quantity, feature_age, weight, level], outputs=logits_flat)
return model
|
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
'The Main Window of the graphical user interface.\n\n The class MainWindow inherits from Ui_MainWindow, which is\n defined in maxent_ui.py. The latter file is autogenerated\n by pyuic from maxent_ui.ui [`pyuic5 maxent_ui.ui -o maxent_ui.py`]\n The ui file can be edited by the QtDesigner.\n '
def __init__(self, *args, obj=None, **kwargs):
'Connect the widgets, instantiate the main classes.'
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()), nw=int(self.num_real_freq.text()), type=str(self.grid_type_combo.currentText()))
self.connect_realgrid_button()
self.connect_wmax()
self.connect_nw()
self.connect_grid_type()
self.connect_load_button_text()
self.connect_show_button_2()
self.connect_select_button_2()
self.text_output.setReadOnly(True)
self.connect_doit_button()
self.output_data = OutputData()
self.connect_select_output_button()
self.connect_save_button()
def connect_realgrid_button(self):
self.gen_real_grid_button.clicked.connect((lambda : self.realgrid.create_grid()))
def connect_wmax(self):
self.max_real_freq.returnPressed.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
self.max_real_freq.editingFinished.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
def connect_nw(self):
self.num_real_freq.returnPressed.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
self.num_real_freq.editingFinished.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
def connect_grid_type(self):
self.grid_type_combo.activated.connect((lambda : self.realgrid.update_type(str(self.grid_type_combo.currentText()))))
def connect_fname_input(self):
self.inp_file_name.editingFinished.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
def get_fname(self):
self.inp_file_name.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'HDF5 files (*.hdf5)')[0])
def get_fname_text(self):
self.inp_file_name_2.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'text files (*.dat *.txt)')[0])
def connect_select_button_2(self):
self.select_file_button_2.clicked.connect(self.get_fname_text)
def connect_num_mats(self):
self.num_mats_freq.editingFinished.connect((lambda : self.input_data.update_num_mats(int(self.num_mats_freq.text()))))
def connect_show_button_2(self):
self.show_data_button_2.clicked.connect((lambda : self.input_data.plot()))
def load_text_data(self):
self.input_data = TextInputData(fname=str(self.inp_file_name_2.text()), data_type='bosonic', n_skip=str(self.n_skip.text()), num_mats=str(self.num_mats_freq_text.text()))
self.input_data.read_data()
def connect_load_button_text(self):
self.load_data_button_2.clicked.connect(self.load_text_data)
def get_preblur(self):
preblur_checked = self.preblur_checkbox.isChecked()
try:
bw = (float(self.blur_width.text()) if preblur_checked else 0.0)
except ValueError:
print('Invalid input for blur width, setting to 0.')
bw = 0.0
preblur = (preblur_checked and (bw > 0.0))
return (preblur, bw)
def main_function(self):
'Main function for the analytic continuation procedure.\n\n This function is called when the "Do it" button is clicked.\n It performs an analytical continuation for the present settings\n and shows a plot.\n '
self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats, im_data=self.input_data.value.real, re_axis=self.realgrid.grid, kernel_mode='freq_bosonic')
model = np.ones_like(self.realgrid.grid)
model /= np.trapz(model, self.realgrid.grid)
(preblur, bw) = self.get_preblur()
sol = self.ana_cont_probl.solve(method='maxent_svd', optimizer='newton', alpha_determination='chi2kink', model=model, stdev=self.input_data.error, interactive=False, alpha_start=10000000000.0, alpha_end=0.001, preblur=preblur, blur_width=bw)
inp_str = 'atom {}, orb {}, spin {}, blur {}: '.format(self.input_data.atom, self.input_data.orbital, self.input_data.spin, bw)
all_chis = np.isfinite(np.array([s.chi2 for s in sol[1]]))
res_str = 'alpha_opt={:3.2f}, chi2(alpha_opt)={:3.2f}, min(chi2)={:3.2f}'.format(sol[0].alpha, sol[0].chi2, np.amin(all_chis))
self.text_output.append((inp_str + res_str))
alphas = [s.alpha for s in sol[1]]
chis = [s.chi2 for s in sol[1]]
self.output_data.update(self.realgrid.grid, sol[0].A_opt, self.input_data)
(fig, ax) = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25))
ax[(0, 0)].loglog(alphas, chis, marker='s', color='black')
ax[(0, 0)].loglog(sol[0].alpha, sol[0].chi2, marker='*', color='red', markersize=15)
ax[(0, 0)].set_xlabel('$\\alpha$')
ax[(0, 0)].set_ylabel('$\\chi^2(\\alpha)$')
ax[(1, 0)].plot(self.realgrid.grid, sol[0].A_opt)
ax[(1, 0)].set_xlabel('$\\omega$')
ax[(1, 0)].set_ylabel('spectrum')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.real, color='blue', ls=':', marker='x', markersize=5, label='Re[data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.imag, color='green', ls=':', marker='+', markersize=5, label='Im[data]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.real, ls='--', color='gray', label='Re[fit]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.imag, color='gray', label='Im[fit]')
ax[(0, 1)].set_xlabel('$\\nu_n$')
ax[(0, 1)].set_ylabel(self.input_data.data_type)
ax[(0, 1)].legend()
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).real, ls='--', label='real part')
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).imag, label='imaginary part')
ax[(1, 1)].set_xlabel('$\\nu_n$')
ax[(1, 1)].set_ylabel('data $-$ fit')
ax[(1, 1)].legend()
plt.tight_layout()
plt.show()
def connect_doit_button(self):
self.doit_button.clicked.connect((lambda : self.main_function()))
def connect_fname_output(self):
self.out_file_name.editingFinished.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
def get_fname_output(self):
fname_out = QtWidgets.QFileDialog.getSaveFileName(self, 'Save as', '/'.join(self.input_data.fname.split('/')[:(- 1)]), 'DAT files (*.dat)')[0]
self.out_file_name.setText(fname_out)
self.output_data.update_fname(fname_out)
def connect_select_output_button(self):
self.output_directory_button.clicked.connect(self.get_fname_output)
def save_output(self):
fname_out = str(self.out_file_name.text())
if (fname_out == ''):
print('Error in saving: First you have to specify the output file name.')
return 1
self.output_data.update_fname(fname_out)
try:
self.output_data.save(interpolate=self.interpolate_checkbox.isChecked(), n_reg=int(self.n_interpolation.text()))
except AttributeError:
print('Error in saving: First you have to specify the output file name.')
def connect_save_button(self):
self.save_button.clicked.connect((lambda : self.save_output()))
|
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(759, 629)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.real_freq_frame = QtWidgets.QFrame(self.centralwidget)
self.real_freq_frame.setGeometry(QtCore.QRect(10, 10, 231, 171))
self.real_freq_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.real_freq_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.real_freq_frame.setObjectName('real_freq_frame')
self.label = QtWidgets.QLabel(self.real_freq_frame)
self.label.setGeometry(QtCore.QRect(10, 10, 141, 17))
self.label.setObjectName('label')
self.label_3 = QtWidgets.QLabel(self.real_freq_frame)
self.label_3.setGeometry(QtCore.QRect(10, 70, 31, 17))
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(self.real_freq_frame)
self.label_4.setGeometry(QtCore.QRect(10, 110, 21, 17))
self.label_4.setObjectName('label_4')
self.grid_type_combo = QtWidgets.QComboBox(self.real_freq_frame)
self.grid_type_combo.setGeometry(QtCore.QRect(10, 40, 201, 25))
self.grid_type_combo.setObjectName('grid_type_combo')
self.grid_type_combo.addItem('')
self.grid_type_combo.addItem('')
self.max_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.max_real_freq.setGeometry(QtCore.QRect(40, 70, 41, 25))
self.max_real_freq.setObjectName('max_real_freq')
self.num_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.num_real_freq.setGeometry(QtCore.QRect(40, 110, 41, 25))
self.num_real_freq.setObjectName('num_real_freq')
self.gen_real_grid_button = QtWidgets.QPushButton(self.real_freq_frame)
self.gen_real_grid_button.setGeometry(QtCore.QRect(90, 110, 71, 25))
self.gen_real_grid_button.setObjectName('gen_real_grid_button')
self.input_data_tabs = QtWidgets.QTabWidget(self.centralwidget)
self.input_data_tabs.setGeometry(QtCore.QRect(250, 10, 501, 171))
self.input_data_tabs.setObjectName('input_data_tabs')
self.text_tab = QtWidgets.QWidget()
self.text_tab.setObjectName('text_tab')
self.label_14 = QtWidgets.QLabel(self.text_tab)
self.label_14.setGeometry(QtCore.QRect(10, 10, 221, 17))
self.label_14.setObjectName('label_14')
self.label_16 = QtWidgets.QLabel(self.text_tab)
self.label_16.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_16.setObjectName('label_16')
self.inp_file_name_2 = QtWidgets.QLineEdit(self.text_tab)
self.inp_file_name_2.setGeometry(QtCore.QRect(90, 40, 361, 25))
self.inp_file_name_2.setObjectName('inp_file_name_2')
self.select_file_button_2 = QtWidgets.QToolButton(self.text_tab)
self.select_file_button_2.setGeometry(QtCore.QRect(460, 40, 26, 24))
self.select_file_button_2.setObjectName('select_file_button_2')
self.load_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.load_data_button_2.setGeometry(QtCore.QRect(300, 110, 89, 25))
self.load_data_button_2.setObjectName('load_data_button_2')
self.show_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.show_data_button_2.setGeometry(QtCore.QRect(400, 110, 89, 25))
self.show_data_button_2.setObjectName('show_data_button_2')
self.label_17 = QtWidgets.QLabel(self.text_tab)
self.label_17.setGeometry(QtCore.QRect(10, 80, 31, 17))
self.label_17.setObjectName('label_17')
self.n_skip = QtWidgets.QLineEdit(self.text_tab)
self.n_skip.setGeometry(QtCore.QRect(50, 80, 31, 25))
self.n_skip.setObjectName('n_skip')
self.label_18 = QtWidgets.QLabel(self.text_tab)
self.label_18.setGeometry(QtCore.QRect(90, 80, 151, 17))
self.label_18.setObjectName('label_18')
self.label_19 = QtWidgets.QLabel(self.text_tab)
self.label_19.setGeometry(QtCore.QRect(10, 110, 31, 17))
self.label_19.setObjectName('label_19')
self.num_mats_freq_text = QtWidgets.QLineEdit(self.text_tab)
self.num_mats_freq_text.setGeometry(QtCore.QRect(50, 110, 31, 25))
self.num_mats_freq_text.setObjectName('num_mats_freq_text')
self.label_20 = QtWidgets.QLabel(self.text_tab)
self.label_20.setGeometry(QtCore.QRect(90, 110, 161, 17))
self.label_20.setObjectName('label_20')
self.input_data_tabs.addTab(self.text_tab, '')
self.continuation_frame = QtWidgets.QFrame(self.centralwidget)
self.continuation_frame.setGeometry(QtCore.QRect(10, 190, 741, 391))
self.continuation_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.continuation_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.continuation_frame.setObjectName('continuation_frame')
self.doit_button = QtWidgets.QPushButton(self.continuation_frame)
self.doit_button.setGeometry(QtCore.QRect(590, 20, 131, 41))
self.doit_button.setObjectName('doit_button')
self.blur_width = QtWidgets.QLineEdit(self.continuation_frame)
self.blur_width.setGeometry(QtCore.QRect(80, 40, 113, 25))
self.blur_width.setObjectName('blur_width')
self.label_11 = QtWidgets.QLabel(self.continuation_frame)
self.label_11.setGeometry(QtCore.QRect(30, 40, 51, 17))
self.label_11.setObjectName('label_11')
self.text_output = QtWidgets.QTextEdit(self.continuation_frame)
self.text_output.setGeometry(QtCore.QRect(30, 80, 691, 231))
self.text_output.setObjectName('text_output')
self.save_button = QtWidgets.QPushButton(self.continuation_frame)
self.save_button.setGeometry(QtCore.QRect(630, 360, 89, 25))
self.save_button.setObjectName('save_button')
self.output_directory_button = QtWidgets.QToolButton(self.continuation_frame)
self.output_directory_button.setGeometry(QtCore.QRect(120, 360, 26, 24))
self.output_directory_button.setObjectName('output_directory_button')
self.out_file_name = QtWidgets.QLineEdit(self.continuation_frame)
self.out_file_name.setGeometry(QtCore.QRect(160, 360, 451, 25))
self.out_file_name.setObjectName('out_file_name')
self.label_12 = QtWidgets.QLabel(self.continuation_frame)
self.label_12.setGeometry(QtCore.QRect(30, 360, 91, 17))
self.label_12.setObjectName('label_12')
self.n_interpolation = QtWidgets.QLineEdit(self.continuation_frame)
self.n_interpolation.setGeometry(QtCore.QRect(200, 320, 41, 25))
self.n_interpolation.setObjectName('n_interpolation')
self.label_13 = QtWidgets.QLabel(self.continuation_frame)
self.label_13.setGeometry(QtCore.QRect(250, 320, 201, 17))
self.label_13.setObjectName('label_13')
self.preblur_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.preblur_checkbox.setGeometry(QtCore.QRect(30, 10, 92, 23))
self.preblur_checkbox.setObjectName('preblur_checkbox')
self.interpolate_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.interpolate_checkbox.setGeometry(QtCore.QRect(30, 320, 171, 23))
self.interpolate_checkbox.setObjectName('interpolate_checkbox')
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 759, 22))
self.menubar.setObjectName('menubar')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.input_data_tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.real_freq_frame.setToolTip(_translate('MainWindow', 'configure real-frequency grid'))
self.real_freq_frame.setWhatsThis(_translate('MainWindow', 'real-frequency grid'))
self.label.setText(_translate('MainWindow', 'Real-frequency grid'))
self.label_3.setText(_translate('MainWindow', 'max'))
self.label_4.setText(_translate('MainWindow', 'n'))
self.grid_type_combo.setToolTip(_translate('MainWindow', 'equispaced or centered grid (denser around Fermi energy)'))
self.grid_type_combo.setItemText(0, _translate('MainWindow', 'equispaced positive'))
self.grid_type_combo.setItemText(1, _translate('MainWindow', 'centered positive'))
self.max_real_freq.setToolTip(_translate('MainWindow', 'upper border of real-frequency grid. (lower border is set symmetrically)'))
self.max_real_freq.setText(_translate('MainWindow', '20'))
self.num_real_freq.setToolTip(_translate('MainWindow', 'number frequencies on real axis; should be an odd number'))
self.num_real_freq.setText(_translate('MainWindow', '401'))
self.gen_real_grid_button.setText(_translate('MainWindow', 'Generate'))
self.label_14.setText(_translate('MainWindow', 'Load susceptibility from text file'))
self.label_16.setText(_translate('MainWindow', 'file name'))
self.inp_file_name_2.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.select_file_button_2.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button_2.setText(_translate('MainWindow', '...'))
self.load_data_button_2.setText(_translate('MainWindow', 'Load data'))
self.show_data_button_2.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button_2.setText(_translate('MainWindow', 'Show data'))
self.label_17.setText(_translate('MainWindow', 'Skip'))
self.label_18.setText(_translate('MainWindow', 'lines at the beginning'))
self.label_19.setText(_translate('MainWindow', 'Use'))
self.label_20.setText(_translate('MainWindow', 'Matsubara frequencies'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.text_tab), _translate('MainWindow', 'text file'))
self.doit_button.setToolTip(_translate('MainWindow', 'perform the analytical continuation'))
self.doit_button.setText(_translate('MainWindow', 'Do it!'))
self.blur_width.setToolTip(_translate('MainWindow', 'set the blur width here'))
self.blur_width.setText(_translate('MainWindow', '0.1'))
self.label_11.setText(_translate('MainWindow', 'Width'))
self.text_output.setToolTip(_translate('MainWindow', 'in this field some output will be shown'))
self.save_button.setToolTip(_translate('MainWindow', 'click this button to save the output'))
self.save_button.setText(_translate('MainWindow', 'Save'))
self.output_directory_button.setToolTip(_translate('MainWindow', 'Choose a directory, where you want to save the output'))
self.output_directory_button.setText(_translate('MainWindow', '...'))
self.out_file_name.setToolTip(_translate('MainWindow', 'type full output name here (including path)'))
self.label_12.setText(_translate('MainWindow', 'Output file:'))
self.n_interpolation.setToolTip(_translate('MainWindow', 'number of regularly spaced grid points for interpolation'))
self.n_interpolation.setText(_translate('MainWindow', '0'))
self.label_13.setText(_translate('MainWindow', 'regularly spaced grid points'))
self.preblur_checkbox.setToolTip(_translate('MainWindow', 'check this if you want to use preblur'))
self.preblur_checkbox.setText(_translate('MainWindow', 'Preblur'))
self.interpolate_checkbox.setToolTip(_translate('MainWindow', 'check this for interpolating output to regular grid'))
self.interpolate_checkbox.setText(_translate('MainWindow', 'Interpolate output to'))
|
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
'The Main Window of the graphical user interface.\n\n The class MainWindow inherits from Ui_MainWindow, which is\n defined in maxent_ui.py. The latter file is autogenerated\n by pyuic from maxent_ui.ui [`pyuic5 maxent_ui.ui -o maxent_ui.py`]\n The ui file can be edited by the QtDesigner.\n '
def __init__(self, *args, obj=None, **kwargs):
'Connect the widgets, instantiate the main classes.'
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()), nw=int(self.num_real_freq.text()), type=str(self.grid_type_combo.currentText()))
self.connect_realgrid_button()
self.connect_wmax()
self.connect_nw()
self.connect_grid_type()
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()))
self.connect_select_button()
self.connect_load_button()
self.connect_show_button()
self.connect_load_button_text()
self.connect_show_button_2()
self.connect_select_button_2()
self.text_output.setReadOnly(True)
self.connect_doit_button()
self.output_data = OutputData()
self.connect_select_output_button()
self.connect_save_button()
def connect_realgrid_button(self):
self.gen_real_grid_button.clicked.connect((lambda : self.realgrid.create_grid()))
def connect_wmax(self):
self.max_real_freq.returnPressed.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
self.max_real_freq.editingFinished.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
def connect_nw(self):
self.num_real_freq.returnPressed.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
self.num_real_freq.editingFinished.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
def connect_grid_type(self):
self.grid_type_combo.activated.connect((lambda : self.realgrid.update_type(str(self.grid_type_combo.currentText()))))
def preset_fnames(self, fname):
self.inp_file_name.setText(fname)
self.inp_file_name_2.setText(fname)
def connect_fname_input(self):
self.inp_file_name.editingFinished.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
def get_fname(self):
self.inp_file_name.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'HDF5 files (*.hdf5)')[0])
def connect_select_button(self):
self.select_file_button.clicked.connect(self.get_fname)
def get_fname_text(self):
self.inp_file_name_2.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'text files (*.dat *.txt)')[0])
def connect_select_button_2(self):
self.select_file_button_2.clicked.connect(self.get_fname_text)
def connect_data_type(self):
self.inp_data_type.activated.connect((lambda : self.input_data.update_data_type(str(self.inp_data_type.currentText()))))
def connect_iteration_type(self):
self.iteration_type_combo.activated.connect((lambda : self.input_data.update_iter_type(str(self.iteration_type_combo.currentText()))))
def connect_iteration_number(self):
self.iteration_number.editingFinished.connect((lambda : self.input_data.update_iter_num(str(self.iteration_number.text()))))
def connect_atom(self):
self.atom_number.editingFinished.connect((lambda : self.input_data.update_atom(int(self.atom_number.text()))))
def connect_orbital(self):
self.orbital_number.editingFinished.connect((lambda : self.input_data.update_orbital(int(self.orbital_number.text()))))
def connect_spin(self):
self.spin_type_combo.activated.connect((lambda : self.input_data.update_spin(str(self.spin_type_combo.currentText()))))
def connect_num_mats(self):
self.num_mats_freq.editingFinished.connect((lambda : self.input_data.update_num_mats(int(self.num_mats_freq.text()))))
def connect_show_button(self):
self.show_data_button.clicked.connect((lambda : self.input_data.plot()))
def connect_show_button_2(self):
self.show_data_button_2.clicked.connect((lambda : self.input_data.plot()))
def load_w2dynamics_data(self):
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()), ignore_real_part=self.ignore_checkbox.isChecked())
self.input_data.load_data()
def connect_load_button(self):
self.load_data_button.clicked.connect(self.load_w2dynamics_data)
def load_text_data(self):
self.input_data = TextInputData(fname=str(self.inp_file_name_2.text()), data_type=str(self.inp_data_type_text.currentText()), n_skip=str(self.n_skip.text()), num_mats=str(self.num_mats_freq_text.text()))
self.input_data.read_data()
def connect_load_button_text(self):
self.load_data_button_2.clicked.connect(self.load_text_data)
def get_preblur(self):
preblur_checked = self.preblur_checkbox.isChecked()
try:
bw = (float(self.blur_width.text()) if preblur_checked else 0.0)
except ValueError:
print('Invalid input for blur width, setting to 0.')
bw = 0.0
preblur = (preblur_checked and (bw > 0.0))
return (preblur, bw)
def main_function(self):
'Main function for the analytic continuation procedure.\n\n This function is called when the "Do it" button is clicked.\n It performs an analytical continuation for the present settings\n and shows a plot.\n '
self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats, im_data=self.input_data.value, re_axis=self.realgrid.grid, kernel_mode='freq_fermionic')
model = np.ones_like(self.realgrid.grid)
model /= np.trapz(model, self.realgrid.grid)
(preblur, bw) = self.get_preblur()
sol = self.ana_cont_probl.solve(method='maxent_svd', optimizer='newton', alpha_determination='chi2kink', model=model, stdev=self.input_data.error, interactive=False, alpha_start=100000000000000.0, alpha_end=0.001, preblur=preblur, blur_width=bw)
inp_str = 'atom {}, orb {}, spin {}, blur {}: '.format(self.input_data.atom, self.input_data.orbital, self.input_data.spin, bw)
all_chis = np.isfinite(np.array([s.chi2 for s in sol[1]]))
res_str = 'alpha_opt={:3.2f}, chi2(alpha_opt)={:3.2f}, min(chi2)={:3.2f}'.format(sol[0].alpha, sol[0].chi2, np.amin(all_chis))
self.text_output.append((inp_str + res_str))
alphas = [s.alpha for s in sol[1]]
chis = [s.chi2 for s in sol[1]]
self.output_data.update(self.realgrid.grid, sol[0].A_opt, self.input_data)
(fig, ax) = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25))
ax[(0, 0)].loglog(alphas, chis, marker='s', color='black')
ax[(0, 0)].loglog(sol[0].alpha, sol[0].chi2, marker='*', color='red', markersize=15)
ax[(0, 0)].set_xlabel('$\\alpha$')
ax[(0, 0)].set_ylabel('$\\chi^2(\\alpha)$')
ax[(1, 0)].plot(self.realgrid.grid, sol[0].A_opt)
ax[(1, 0)].set_xlabel('$\\omega$')
ax[(1, 0)].set_ylabel('spectrum')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.real, color='blue', ls=':', marker='x', markersize=5, label='Re[data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.imag, color='green', ls=':', marker='+', markersize=5, label='Im[data]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.real, ls='--', color='gray', label='Re[fit]')
ax[(0, 1)].plot(self.input_data.mats, sol[0].backtransform.imag, color='gray', label='Im[fit]')
ax[(0, 1)].set_xlabel('$\\nu_n$')
ax[(0, 1)].set_ylabel(self.input_data.data_type)
ax[(0, 1)].legend()
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).real, ls='--', label='real part')
ax[(1, 1)].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).imag, label='imaginary part')
ax[(1, 1)].set_xlabel('$\\nu_n$')
ax[(1, 1)].set_ylabel('data $-$ fit')
ax[(1, 1)].legend()
plt.tight_layout()
plt.show()
def connect_doit_button(self):
self.doit_button.clicked.connect((lambda : self.main_function()))
def connect_fname_output(self):
self.out_file_name.editingFinished.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
def get_fname_output(self):
fname_out = QtWidgets.QFileDialog.getSaveFileName(self, 'Save as', '/'.join(self.input_data.fname.split('/')[:(- 1)]), 'DAT files (*.dat)')[0]
self.out_file_name.setText(fname_out)
self.output_data.update_fname(fname_out)
def connect_select_output_button(self):
self.output_directory_button.clicked.connect(self.get_fname_output)
def save_output(self):
fname_out = str(self.out_file_name.text())
if (fname_out == ''):
print('Error in saving: First you have to specify the output file name.')
return 1
self.output_data.update_fname(fname_out)
try:
self.output_data.save(interpolate=self.interpolate_checkbox.isChecked(), n_reg=int(self.n_interpolation.text()))
except AttributeError:
print('Error in saving: First you have to specify the output file name.')
def connect_save_button(self):
self.save_button.clicked.connect((lambda : self.save_output()))
|
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(760, 633)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.real_freq_frame = QtWidgets.QFrame(self.centralwidget)
self.real_freq_frame.setGeometry(QtCore.QRect(10, 10, 171, 171))
self.real_freq_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.real_freq_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.real_freq_frame.setObjectName('real_freq_frame')
self.label = QtWidgets.QLabel(self.real_freq_frame)
self.label.setGeometry(QtCore.QRect(10, 10, 141, 17))
self.label.setObjectName('label')
self.label_3 = QtWidgets.QLabel(self.real_freq_frame)
self.label_3.setGeometry(QtCore.QRect(10, 70, 31, 17))
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(self.real_freq_frame)
self.label_4.setGeometry(QtCore.QRect(10, 110, 21, 17))
self.label_4.setObjectName('label_4')
self.grid_type_combo = QtWidgets.QComboBox(self.real_freq_frame)
self.grid_type_combo.setGeometry(QtCore.QRect(10, 40, 141, 25))
self.grid_type_combo.setObjectName('grid_type_combo')
self.grid_type_combo.addItem('')
self.grid_type_combo.addItem('')
self.max_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.max_real_freq.setGeometry(QtCore.QRect(40, 70, 41, 25))
self.max_real_freq.setObjectName('max_real_freq')
self.num_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.num_real_freq.setGeometry(QtCore.QRect(40, 110, 41, 25))
self.num_real_freq.setObjectName('num_real_freq')
self.gen_real_grid_button = QtWidgets.QPushButton(self.real_freq_frame)
self.gen_real_grid_button.setGeometry(QtCore.QRect(90, 110, 71, 25))
self.gen_real_grid_button.setObjectName('gen_real_grid_button')
self.continuation_frame = QtWidgets.QFrame(self.centralwidget)
self.continuation_frame.setGeometry(QtCore.QRect(10, 190, 741, 391))
self.continuation_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.continuation_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.continuation_frame.setObjectName('continuation_frame')
self.doit_button = QtWidgets.QPushButton(self.continuation_frame)
self.doit_button.setGeometry(QtCore.QRect(590, 20, 131, 41))
self.doit_button.setObjectName('doit_button')
self.blur_width = QtWidgets.QLineEdit(self.continuation_frame)
self.blur_width.setGeometry(QtCore.QRect(80, 40, 113, 25))
self.blur_width.setObjectName('blur_width')
self.label_11 = QtWidgets.QLabel(self.continuation_frame)
self.label_11.setGeometry(QtCore.QRect(30, 40, 51, 17))
self.label_11.setObjectName('label_11')
self.text_output = QtWidgets.QTextEdit(self.continuation_frame)
self.text_output.setGeometry(QtCore.QRect(30, 80, 691, 231))
self.text_output.setObjectName('text_output')
self.save_button = QtWidgets.QPushButton(self.continuation_frame)
self.save_button.setGeometry(QtCore.QRect(630, 360, 89, 25))
self.save_button.setObjectName('save_button')
self.output_directory_button = QtWidgets.QToolButton(self.continuation_frame)
self.output_directory_button.setGeometry(QtCore.QRect(120, 360, 26, 24))
self.output_directory_button.setObjectName('output_directory_button')
self.out_file_name = QtWidgets.QLineEdit(self.continuation_frame)
self.out_file_name.setGeometry(QtCore.QRect(160, 360, 451, 25))
self.out_file_name.setObjectName('out_file_name')
self.label_12 = QtWidgets.QLabel(self.continuation_frame)
self.label_12.setGeometry(QtCore.QRect(30, 360, 91, 17))
self.label_12.setObjectName('label_12')
self.n_interpolation = QtWidgets.QLineEdit(self.continuation_frame)
self.n_interpolation.setGeometry(QtCore.QRect(200, 320, 41, 25))
self.n_interpolation.setObjectName('n_interpolation')
self.label_13 = QtWidgets.QLabel(self.continuation_frame)
self.label_13.setGeometry(QtCore.QRect(250, 320, 201, 17))
self.label_13.setObjectName('label_13')
self.preblur_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.preblur_checkbox.setGeometry(QtCore.QRect(30, 10, 92, 23))
self.preblur_checkbox.setObjectName('preblur_checkbox')
self.interpolate_checkbox = QtWidgets.QCheckBox(self.continuation_frame)
self.interpolate_checkbox.setGeometry(QtCore.QRect(30, 320, 171, 23))
self.interpolate_checkbox.setObjectName('interpolate_checkbox')
self.input_data_tabs = QtWidgets.QTabWidget(self.centralwidget)
self.input_data_tabs.setGeometry(QtCore.QRect(190, 10, 561, 171))
self.input_data_tabs.setObjectName('input_data_tabs')
self.w2dyn_tab = QtWidgets.QWidget()
self.w2dyn_tab.setObjectName('w2dyn_tab')
self.input_data_frame = QtWidgets.QFrame(self.w2dyn_tab)
self.input_data_frame.setGeometry(QtCore.QRect(0, 0, 561, 141))
self.input_data_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.input_data_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.input_data_frame.setObjectName('input_data_frame')
self.label_2 = QtWidgets.QLabel(self.input_data_frame)
self.label_2.setGeometry(QtCore.QRect(10, 10, 161, 17))
self.label_2.setObjectName('label_2')
self.inp_data_type = QtWidgets.QComboBox(self.input_data_frame)
self.inp_data_type.setGeometry(QtCore.QRect(180, 10, 121, 25))
self.inp_data_type.setObjectName('inp_data_type')
self.inp_data_type.addItem('')
self.inp_data_type.addItem('')
self.label_5 = QtWidgets.QLabel(self.input_data_frame)
self.label_5.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_5.setObjectName('label_5')
self.inp_file_name = QtWidgets.QLineEdit(self.input_data_frame)
self.inp_file_name.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name.setObjectName('inp_file_name')
self.label_6 = QtWidgets.QLabel(self.input_data_frame)
self.label_6.setGeometry(QtCore.QRect(10, 70, 67, 17))
self.label_6.setObjectName('label_6')
self.iteration_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.iteration_type_combo.setGeometry(QtCore.QRect(80, 70, 86, 25))
self.iteration_type_combo.setObjectName('iteration_type_combo')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_number = QtWidgets.QLineEdit(self.input_data_frame)
self.iteration_number.setGeometry(QtCore.QRect(170, 70, 31, 25))
self.iteration_number.setObjectName('iteration_number')
self.label_7 = QtWidgets.QLabel(self.input_data_frame)
self.label_7.setGeometry(QtCore.QRect(230, 70, 41, 17))
self.label_7.setObjectName('label_7')
self.atom_number = QtWidgets.QLineEdit(self.input_data_frame)
self.atom_number.setGeometry(QtCore.QRect(280, 70, 21, 25))
self.atom_number.setObjectName('atom_number')
self.label_8 = QtWidgets.QLabel(self.input_data_frame)
self.label_8.setGeometry(QtCore.QRect(320, 70, 51, 17))
self.label_8.setObjectName('label_8')
self.orbital_number = QtWidgets.QLineEdit(self.input_data_frame)
self.orbital_number.setGeometry(QtCore.QRect(380, 70, 21, 25))
self.orbital_number.setObjectName('orbital_number')
self.label_9 = QtWidgets.QLabel(self.input_data_frame)
self.label_9.setGeometry(QtCore.QRect(420, 70, 31, 17))
self.label_9.setObjectName('label_9')
self.spin_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.spin_type_combo.setGeometry(QtCore.QRect(460, 70, 81, 25))
self.spin_type_combo.setObjectName('spin_type_combo')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.load_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.load_data_button.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button.setObjectName('load_data_button')
self.label_10 = QtWidgets.QLabel(self.input_data_frame)
self.label_10.setGeometry(QtCore.QRect(10, 110, 241, 17))
self.label_10.setObjectName('label_10')
self.num_mats_freq = QtWidgets.QLineEdit(self.input_data_frame)
self.num_mats_freq.setGeometry(QtCore.QRect(250, 110, 41, 25))
self.num_mats_freq.setObjectName('num_mats_freq')
self.show_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.show_data_button.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button.setObjectName('show_data_button')
self.select_file_button = QtWidgets.QToolButton(self.input_data_frame)
self.select_file_button.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button.setObjectName('select_file_button')
self.ignore_checkbox = QtWidgets.QCheckBox(self.input_data_frame)
self.ignore_checkbox.setGeometry(QtCore.QRect(350, 10, 131, 23))
self.ignore_checkbox.setObjectName('ignore_checkbox')
self.input_data_tabs.addTab(self.w2dyn_tab, '')
self.text_tab = QtWidgets.QWidget()
self.text_tab.setObjectName('text_tab')
self.inp_data_type_text = QtWidgets.QComboBox(self.text_tab)
self.inp_data_type_text.setGeometry(QtCore.QRect(50, 10, 121, 25))
self.inp_data_type_text.setObjectName('inp_data_type_text')
self.inp_data_type_text.addItem('')
self.inp_data_type_text.addItem('')
self.label_14 = QtWidgets.QLabel(self.text_tab)
self.label_14.setGeometry(QtCore.QRect(10, 10, 41, 17))
self.label_14.setObjectName('label_14')
self.label_15 = QtWidgets.QLabel(self.text_tab)
self.label_15.setGeometry(QtCore.QRect(180, 10, 91, 17))
self.label_15.setObjectName('label_15')
self.label_16 = QtWidgets.QLabel(self.text_tab)
self.label_16.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_16.setObjectName('label_16')
self.inp_file_name_2 = QtWidgets.QLineEdit(self.text_tab)
self.inp_file_name_2.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name_2.setObjectName('inp_file_name_2')
self.select_file_button_2 = QtWidgets.QToolButton(self.text_tab)
self.select_file_button_2.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button_2.setObjectName('select_file_button_2')
self.load_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.load_data_button_2.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button_2.setObjectName('load_data_button_2')
self.show_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.show_data_button_2.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button_2.setObjectName('show_data_button_2')
self.label_17 = QtWidgets.QLabel(self.text_tab)
self.label_17.setGeometry(QtCore.QRect(10, 80, 31, 17))
self.label_17.setObjectName('label_17')
self.n_skip = QtWidgets.QLineEdit(self.text_tab)
self.n_skip.setGeometry(QtCore.QRect(50, 80, 31, 25))
self.n_skip.setObjectName('n_skip')
self.label_18 = QtWidgets.QLabel(self.text_tab)
self.label_18.setGeometry(QtCore.QRect(90, 80, 151, 17))
self.label_18.setObjectName('label_18')
self.label_19 = QtWidgets.QLabel(self.text_tab)
self.label_19.setGeometry(QtCore.QRect(10, 110, 31, 17))
self.label_19.setObjectName('label_19')
self.num_mats_freq_text = QtWidgets.QLineEdit(self.text_tab)
self.num_mats_freq_text.setGeometry(QtCore.QRect(50, 110, 31, 25))
self.num_mats_freq_text.setObjectName('num_mats_freq_text')
self.label_20 = QtWidgets.QLabel(self.text_tab)
self.label_20.setGeometry(QtCore.QRect(90, 110, 161, 17))
self.label_20.setObjectName('label_20')
self.input_data_tabs.addTab(self.text_tab, '')
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 760, 22))
self.menubar.setObjectName('menubar')
self.menuMaxEnt = QtWidgets.QMenu(self.menubar)
self.menuMaxEnt.setObjectName('menuMaxEnt')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuMaxEnt.menuAction())
self.retranslateUi(MainWindow)
self.input_data_tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.real_freq_frame.setToolTip(_translate('MainWindow', 'configure real-frequency grid'))
self.real_freq_frame.setWhatsThis(_translate('MainWindow', 'real-frequency grid'))
self.label.setText(_translate('MainWindow', 'Real-frequency grid'))
self.label_3.setText(_translate('MainWindow', 'max'))
self.label_4.setText(_translate('MainWindow', 'n'))
self.grid_type_combo.setToolTip(_translate('MainWindow', 'equispaced or centered grid (denser around Fermi energy)'))
self.grid_type_combo.setItemText(0, _translate('MainWindow', 'equispaced symmetric'))
self.grid_type_combo.setItemText(1, _translate('MainWindow', 'centered symmetric'))
self.max_real_freq.setToolTip(_translate('MainWindow', 'upper border of real-frequency grid. (lower border is set symmetrically)'))
self.max_real_freq.setText(_translate('MainWindow', '20'))
self.num_real_freq.setToolTip(_translate('MainWindow', 'number frequencies on real axis; should be an odd number'))
self.num_real_freq.setText(_translate('MainWindow', '401'))
self.gen_real_grid_button.setText(_translate('MainWindow', 'Generate'))
self.doit_button.setToolTip(_translate('MainWindow', 'perform the analytical continuation'))
self.doit_button.setText(_translate('MainWindow', 'Do it!'))
self.blur_width.setToolTip(_translate('MainWindow', 'set the blur width here'))
self.blur_width.setText(_translate('MainWindow', '0.1'))
self.label_11.setText(_translate('MainWindow', 'Width'))
self.text_output.setToolTip(_translate('MainWindow', 'in this field some output will be shown'))
self.save_button.setToolTip(_translate('MainWindow', 'click this button to save the output'))
self.save_button.setText(_translate('MainWindow', 'Save'))
self.output_directory_button.setToolTip(_translate('MainWindow', 'Choose a directory, where you want to save the output'))
self.output_directory_button.setText(_translate('MainWindow', '...'))
self.out_file_name.setToolTip(_translate('MainWindow', 'type full output name here (including path)'))
self.label_12.setText(_translate('MainWindow', 'Output file:'))
self.n_interpolation.setToolTip(_translate('MainWindow', 'number of regularly spaced grid points for interpolation'))
self.n_interpolation.setText(_translate('MainWindow', '0'))
self.label_13.setText(_translate('MainWindow', 'regularly spaced grid points'))
self.preblur_checkbox.setToolTip(_translate('MainWindow', 'check this if you want to use preblur'))
self.preblur_checkbox.setText(_translate('MainWindow', 'Preblur'))
self.interpolate_checkbox.setToolTip(_translate('MainWindow', 'check this for interpolating output to regular grid'))
self.interpolate_checkbox.setText(_translate('MainWindow', 'Interpolate output to'))
self.label_2.setText(_translate('MainWindow', 'Load w2dynamics data'))
self.inp_data_type.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type.setItemText(1, _translate('MainWindow', "Green's function"))
self.label_5.setText(_translate('MainWindow', 'file name'))
self.inp_file_name.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.label_6.setText(_translate('MainWindow', 'iteration'))
self.iteration_type_combo.setItemText(0, _translate('MainWindow', 'DMFT'))
self.iteration_type_combo.setItemText(1, _translate('MainWindow', 'STAT'))
self.iteration_type_combo.setItemText(2, _translate('MainWindow', 'WORM'))
self.iteration_number.setToolTip(_translate('MainWindow', 'integer; leave empty for last iteration'))
self.label_7.setText(_translate('MainWindow', 'Atom'))
self.atom_number.setToolTip(_translate('MainWindow', 'choose inequivalent atom (one-based integer)'))
self.atom_number.setText(_translate('MainWindow', '1'))
self.label_8.setText(_translate('MainWindow', 'Orbital'))
self.orbital_number.setToolTip(_translate('MainWindow', 'choose orbital (one-based integer)'))
self.orbital_number.setText(_translate('MainWindow', '1'))
self.label_9.setText(_translate('MainWindow', 'Spin'))
self.spin_type_combo.setToolTip(_translate('MainWindow', 'choose spin up/down; average for paramagnetic system'))
self.spin_type_combo.setItemText(0, _translate('MainWindow', 'average'))
self.spin_type_combo.setItemText(1, _translate('MainWindow', 'up'))
self.spin_type_combo.setItemText(2, _translate('MainWindow', 'down'))
self.load_data_button.setText(_translate('MainWindow', 'Load data'))
self.label_10.setText(_translate('MainWindow', 'Number of Matsubara frequencies'))
self.num_mats_freq.setToolTip(_translate('MainWindow', 'How many Matsubara frequencies do you want to use for the continuation?'))
self.show_data_button.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button.setText(_translate('MainWindow', 'Show data'))
self.select_file_button.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button.setText(_translate('MainWindow', '...'))
self.ignore_checkbox.setText(_translate('MainWindow', 'Ignore real part'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.w2dyn_tab), _translate('MainWindow', ' w2dynamics file'))
self.inp_data_type_text.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type_text.setItemText(1, _translate('MainWindow', "Green's function"))
self.label_14.setText(_translate('MainWindow', 'Load'))
self.label_15.setText(_translate('MainWindow', 'from text file'))
self.label_16.setText(_translate('MainWindow', 'file name'))
self.inp_file_name_2.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.select_file_button_2.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button_2.setText(_translate('MainWindow', '...'))
self.load_data_button_2.setText(_translate('MainWindow', 'Load data'))
self.show_data_button_2.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button_2.setText(_translate('MainWindow', 'Show data'))
self.label_17.setText(_translate('MainWindow', 'Skip'))
self.label_18.setText(_translate('MainWindow', 'lines at the beginning'))
self.label_19.setText(_translate('MainWindow', 'Use'))
self.label_20.setText(_translate('MainWindow', 'Matsubara frequencies'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.text_tab), _translate('MainWindow', 'text file'))
self.menuMaxEnt.setTitle(_translate('MainWindow', 'MaxEnt'))
|
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
'The Main Window of the graphical user interface.\n\n The class MainWindow inherits from Ui_MainWindow, which is\n defined in pade_ui.py. The latter file is autogenerated\n by pyuic from pade_ui.ui [`pyuic5 pade_ui.ui -o pade_ui.py`]\n The ui file can be edited by the QtDesigner.\n '
def __init__(self, *args, obj=None, **kwargs):
'Connect the widgets, instantiate the main classes.'
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()), nw=int(self.num_real_freq.text()), type=str(self.grid_type_combo.currentText()))
self.connect_realgrid_button()
self.connect_wmax()
self.connect_nw()
self.connect_grid_type()
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()))
self.connect_select_button()
self.connect_load_button()
self.connect_show_button()
self.connect_load_button_text()
self.connect_show_button_2()
self.connect_select_button_2()
self.connect_doit_button()
self.output_data = OutputData()
self.connect_select_output_button()
self.connect_save_button()
def connect_realgrid_button(self):
self.gen_real_grid_button.clicked.connect((lambda : self.realgrid.create_grid()))
def connect_wmax(self):
self.max_real_freq.returnPressed.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
self.max_real_freq.editingFinished.connect((lambda : self.realgrid.update_wmax(float(self.max_real_freq.text()))))
def connect_nw(self):
self.num_real_freq.returnPressed.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
self.num_real_freq.editingFinished.connect((lambda : self.realgrid.update_nw(int(self.num_real_freq.text()))))
def connect_grid_type(self):
self.grid_type_combo.activated.connect((lambda : self.realgrid.update_type(str(self.grid_type_combo.currentText()))))
def connect_fname_input(self):
self.inp_file_name.editingFinished.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.input_data.update_fname(str(self.inp_file_name.text()))))
def get_fname(self):
self.inp_file_name.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'HDF5 files (*.hdf5)')[0])
def connect_select_button(self):
self.select_file_button.clicked.connect(self.get_fname)
def get_fname_text(self):
self.inp_file_name_2.setText(QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), 'text files (*.dat *.txt)')[0])
def connect_select_button_2(self):
self.select_file_button_2.clicked.connect(self.get_fname_text)
def connect_show_button(self):
self.show_data_button.clicked.connect((lambda : self.input_data.plot()))
def connect_show_button_2(self):
self.show_data_button_2.clicked.connect((lambda : self.input_data.plot()))
def load_w2dynamics_data(self):
self.input_data = InputData(fname=str(self.inp_file_name.text()), iter_type=str(self.iteration_type_combo.currentText()), iter_num=str(self.iteration_number.text()), data_type=str(self.inp_data_type.currentText()), atom=str(self.atom_number.text()), orbital=str(self.orbital_number.text()), spin=str(self.spin_type_combo.currentText()), num_mats=str(self.num_mats_freq.text()), ignore_real_part=self.ignore_checkbox.isChecked())
self.input_data.load_data()
def connect_load_button(self):
self.load_data_button.clicked.connect(self.load_w2dynamics_data)
def load_text_data(self):
self.input_data = TextInputData(fname=str(self.inp_file_name_2.text()), data_type=str(self.inp_data_type_text.currentText()), n_skip=str(self.n_skip.text()), num_mats=str(self.num_mats_freq_text.text()))
self.input_data.read_data()
def connect_load_button_text(self):
self.load_data_button_2.clicked.connect(self.load_text_data)
def parse_mats_ind(self):
mats_ind_str = self.mats_ind_inp.text()
mats_list_str = [part.strip() for part in mats_ind_str.split(',')]
if ('' in mats_list_str):
mats_list_str.remove('')
mats_ind = np.array([int(ind) for ind in mats_list_str])
print(mats_ind)
return mats_ind
def main_function(self):
'Main function for the analytic continuation procedure.\n\n This function is called when the "Do it" button is clicked.\n It performs an analytical continuation for the present settings\n and shows a plot.\n '
mats_ind = self.parse_mats_ind()
self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats[mats_ind], im_data=self.input_data.value[mats_ind], re_axis=self.realgrid.grid, kernel_mode='freq_fermionic')
sol = self.ana_cont_probl.solve(method='pade')
check_axis = np.linspace(0.0, (1.25 * self.input_data.mats[mats_ind[(- 1)]]), num=500)
check = self.ana_cont_probl.solver.check(im_axis_fine=check_axis)
self.output_data.update(self.realgrid.grid, sol.A_opt, self.input_data)
(fig, ax) = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25))
ax[(0, 0)].plot(self.realgrid.grid, sol.A_opt)
ax[(0, 0)].set_xlabel('$\\omega$')
ax[(0, 0)].set_ylabel('spectrum')
ax[(0, 1)].plot(self.input_data.mats[mats_ind], self.input_data.value.real[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Re[selected data]')
ax[(0, 1)].plot(self.input_data.mats[mats_ind], self.input_data.value.imag[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Im[selected data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.real, color='blue', ls=':', marker='x', markersize=5, label='Re[full data]')
ax[(0, 1)].plot(self.input_data.mats, self.input_data.value.imag, color='green', ls=':', marker='+', markersize=5, label='Im[full data]')
ax[(1, 0)].plot(self.input_data.mats[mats_ind], self.input_data.value.real[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Re[selected data]')
ax[(1, 0)].plot(self.input_data.mats[mats_ind], self.input_data.value.imag[mats_ind], color='red', ls='None', marker='.', markersize=12, alpha=0.33, label='Im[selected data]')
ax[(1, 0)].plot(check_axis, check.real, ls='--', color='gray', label='Re[Pade interpolation]')
ax[(1, 0)].plot(check_axis, check.imag, color='gray', label='Im[Pade interpolation]')
ax[(1, 0)].set_xlabel('$\\nu_n$')
ax[(1, 0)].set_ylabel(self.input_data.data_type)
ax[(1, 0)].legend()
ax[(1, 0)].set_xlim(0.0, (1.05 * check_axis[(- 1)]))
plt.tight_layout()
plt.show()
def connect_doit_button(self):
self.doit_button.clicked.connect((lambda : self.main_function()))
def connect_fname_output(self):
self.out_file_name.editingFinished.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
self.inp_file_name.textChanged.connect((lambda : self.output_data.update_fname(str(self.out_file_name.text()))))
def get_fname_output(self):
fname_out = QtWidgets.QFileDialog.getSaveFileName(self, 'Save as', '/'.join(self.input_data.fname.split('/')[:(- 1)]), 'DAT files (*.dat)')[0]
self.out_file_name.setText(fname_out)
self.output_data.update_fname(fname_out)
def connect_select_output_button(self):
self.output_directory_button.clicked.connect(self.get_fname_output)
def save_output(self):
fname_out = str(self.out_file_name.text())
if (fname_out == ''):
print('Error in saving: First you have to specify the output file name.')
return 1
self.output_data.update_fname(fname_out)
self.output_data.save(interpolate=False)
def connect_save_button(self):
self.save_button.clicked.connect((lambda : self.save_output()))
|
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.resize(800, 399)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.real_freq_frame = QtWidgets.QFrame(self.centralwidget)
self.real_freq_frame.setGeometry(QtCore.QRect(20, 20, 171, 171))
self.real_freq_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.real_freq_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.real_freq_frame.setObjectName('real_freq_frame')
self.label = QtWidgets.QLabel(self.real_freq_frame)
self.label.setGeometry(QtCore.QRect(10, 10, 141, 17))
self.label.setObjectName('label')
self.label_3 = QtWidgets.QLabel(self.real_freq_frame)
self.label_3.setGeometry(QtCore.QRect(10, 70, 31, 17))
self.label_3.setObjectName('label_3')
self.label_4 = QtWidgets.QLabel(self.real_freq_frame)
self.label_4.setGeometry(QtCore.QRect(10, 110, 21, 17))
self.label_4.setObjectName('label_4')
self.grid_type_combo = QtWidgets.QComboBox(self.real_freq_frame)
self.grid_type_combo.setGeometry(QtCore.QRect(10, 40, 141, 25))
self.grid_type_combo.setObjectName('grid_type_combo')
self.grid_type_combo.addItem('')
self.grid_type_combo.addItem('')
self.max_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.max_real_freq.setGeometry(QtCore.QRect(40, 70, 41, 25))
self.max_real_freq.setObjectName('max_real_freq')
self.num_real_freq = QtWidgets.QLineEdit(self.real_freq_frame)
self.num_real_freq.setGeometry(QtCore.QRect(40, 110, 41, 25))
self.num_real_freq.setObjectName('num_real_freq')
self.gen_real_grid_button = QtWidgets.QPushButton(self.real_freq_frame)
self.gen_real_grid_button.setGeometry(QtCore.QRect(90, 110, 71, 25))
self.gen_real_grid_button.setObjectName('gen_real_grid_button')
self.input_data_tabs = QtWidgets.QTabWidget(self.centralwidget)
self.input_data_tabs.setGeometry(QtCore.QRect(210, 20, 561, 171))
self.input_data_tabs.setObjectName('input_data_tabs')
self.w2dyn_tab = QtWidgets.QWidget()
self.w2dyn_tab.setObjectName('w2dyn_tab')
self.input_data_frame = QtWidgets.QFrame(self.w2dyn_tab)
self.input_data_frame.setGeometry(QtCore.QRect(0, 0, 561, 141))
self.input_data_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.input_data_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.input_data_frame.setObjectName('input_data_frame')
self.label_2 = QtWidgets.QLabel(self.input_data_frame)
self.label_2.setGeometry(QtCore.QRect(10, 10, 161, 17))
self.label_2.setObjectName('label_2')
self.inp_data_type = QtWidgets.QComboBox(self.input_data_frame)
self.inp_data_type.setGeometry(QtCore.QRect(180, 10, 121, 25))
self.inp_data_type.setObjectName('inp_data_type')
self.inp_data_type.addItem('')
self.inp_data_type.addItem('')
self.label_5 = QtWidgets.QLabel(self.input_data_frame)
self.label_5.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_5.setObjectName('label_5')
self.inp_file_name = QtWidgets.QLineEdit(self.input_data_frame)
self.inp_file_name.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name.setObjectName('inp_file_name')
self.label_6 = QtWidgets.QLabel(self.input_data_frame)
self.label_6.setGeometry(QtCore.QRect(10, 70, 67, 17))
self.label_6.setObjectName('label_6')
self.iteration_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.iteration_type_combo.setGeometry(QtCore.QRect(80, 70, 86, 25))
self.iteration_type_combo.setObjectName('iteration_type_combo')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_type_combo.addItem('')
self.iteration_number = QtWidgets.QLineEdit(self.input_data_frame)
self.iteration_number.setGeometry(QtCore.QRect(170, 70, 31, 25))
self.iteration_number.setObjectName('iteration_number')
self.label_7 = QtWidgets.QLabel(self.input_data_frame)
self.label_7.setGeometry(QtCore.QRect(230, 70, 41, 17))
self.label_7.setObjectName('label_7')
self.atom_number = QtWidgets.QLineEdit(self.input_data_frame)
self.atom_number.setGeometry(QtCore.QRect(280, 70, 21, 25))
self.atom_number.setObjectName('atom_number')
self.label_8 = QtWidgets.QLabel(self.input_data_frame)
self.label_8.setGeometry(QtCore.QRect(320, 70, 51, 17))
self.label_8.setObjectName('label_8')
self.orbital_number = QtWidgets.QLineEdit(self.input_data_frame)
self.orbital_number.setGeometry(QtCore.QRect(380, 70, 21, 25))
self.orbital_number.setObjectName('orbital_number')
self.label_9 = QtWidgets.QLabel(self.input_data_frame)
self.label_9.setGeometry(QtCore.QRect(420, 70, 31, 17))
self.label_9.setObjectName('label_9')
self.spin_type_combo = QtWidgets.QComboBox(self.input_data_frame)
self.spin_type_combo.setGeometry(QtCore.QRect(460, 70, 81, 25))
self.spin_type_combo.setObjectName('spin_type_combo')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.spin_type_combo.addItem('')
self.load_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.load_data_button.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button.setObjectName('load_data_button')
self.label_10 = QtWidgets.QLabel(self.input_data_frame)
self.label_10.setGeometry(QtCore.QRect(10, 110, 241, 17))
self.label_10.setObjectName('label_10')
self.num_mats_freq = QtWidgets.QLineEdit(self.input_data_frame)
self.num_mats_freq.setGeometry(QtCore.QRect(250, 110, 41, 25))
self.num_mats_freq.setObjectName('num_mats_freq')
self.show_data_button = QtWidgets.QPushButton(self.input_data_frame)
self.show_data_button.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button.setObjectName('show_data_button')
self.select_file_button = QtWidgets.QToolButton(self.input_data_frame)
self.select_file_button.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button.setObjectName('select_file_button')
self.ignore_checkbox = QtWidgets.QCheckBox(self.input_data_frame)
self.ignore_checkbox.setGeometry(QtCore.QRect(320, 10, 131, 23))
self.ignore_checkbox.setObjectName('ignore_checkbox')
self.input_data_tabs.addTab(self.w2dyn_tab, '')
self.text_tab = QtWidgets.QWidget()
self.text_tab.setObjectName('text_tab')
self.inp_data_type_text = QtWidgets.QComboBox(self.text_tab)
self.inp_data_type_text.setGeometry(QtCore.QRect(50, 10, 121, 25))
self.inp_data_type_text.setObjectName('inp_data_type_text')
self.inp_data_type_text.addItem('')
self.inp_data_type_text.addItem('')
self.inp_data_type_text.addItem('')
self.label_14 = QtWidgets.QLabel(self.text_tab)
self.label_14.setGeometry(QtCore.QRect(10, 10, 41, 17))
self.label_14.setObjectName('label_14')
self.label_15 = QtWidgets.QLabel(self.text_tab)
self.label_15.setGeometry(QtCore.QRect(180, 10, 91, 17))
self.label_15.setObjectName('label_15')
self.label_16 = QtWidgets.QLabel(self.text_tab)
self.label_16.setGeometry(QtCore.QRect(10, 40, 67, 17))
self.label_16.setObjectName('label_16')
self.inp_file_name_2 = QtWidgets.QLineEdit(self.text_tab)
self.inp_file_name_2.setGeometry(QtCore.QRect(80, 40, 421, 25))
self.inp_file_name_2.setObjectName('inp_file_name_2')
self.select_file_button_2 = QtWidgets.QToolButton(self.text_tab)
self.select_file_button_2.setGeometry(QtCore.QRect(510, 40, 26, 24))
self.select_file_button_2.setObjectName('select_file_button_2')
self.load_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.load_data_button_2.setGeometry(QtCore.QRect(350, 110, 89, 25))
self.load_data_button_2.setObjectName('load_data_button_2')
self.show_data_button_2 = QtWidgets.QPushButton(self.text_tab)
self.show_data_button_2.setGeometry(QtCore.QRect(450, 110, 89, 25))
self.show_data_button_2.setObjectName('show_data_button_2')
self.label_17 = QtWidgets.QLabel(self.text_tab)
self.label_17.setGeometry(QtCore.QRect(10, 80, 31, 17))
self.label_17.setObjectName('label_17')
self.n_skip = QtWidgets.QLineEdit(self.text_tab)
self.n_skip.setGeometry(QtCore.QRect(50, 80, 31, 25))
self.n_skip.setObjectName('n_skip')
self.label_18 = QtWidgets.QLabel(self.text_tab)
self.label_18.setGeometry(QtCore.QRect(90, 80, 151, 17))
self.label_18.setObjectName('label_18')
self.label_19 = QtWidgets.QLabel(self.text_tab)
self.label_19.setGeometry(QtCore.QRect(10, 110, 31, 17))
self.label_19.setObjectName('label_19')
self.num_mats_freq_text = QtWidgets.QLineEdit(self.text_tab)
self.num_mats_freq_text.setGeometry(QtCore.QRect(50, 110, 31, 25))
self.num_mats_freq_text.setObjectName('num_mats_freq_text')
self.label_20 = QtWidgets.QLabel(self.text_tab)
self.label_20.setGeometry(QtCore.QRect(90, 110, 161, 17))
self.label_20.setObjectName('label_20')
self.input_data_tabs.addTab(self.text_tab, '')
self.doit_button = QtWidgets.QPushButton(self.centralwidget)
self.doit_button.setGeometry(QtCore.QRect(650, 230, 131, 41))
self.doit_button.setObjectName('doit_button')
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(50, 300, 91, 17))
self.label_12.setObjectName('label_12')
self.output_directory_button = QtWidgets.QToolButton(self.centralwidget)
self.output_directory_button.setGeometry(QtCore.QRect(140, 300, 26, 24))
self.output_directory_button.setObjectName('output_directory_button')
self.save_button = QtWidgets.QPushButton(self.centralwidget)
self.save_button.setGeometry(QtCore.QRect(650, 300, 89, 25))
self.save_button.setObjectName('save_button')
self.out_file_name = QtWidgets.QLineEdit(self.centralwidget)
self.out_file_name.setGeometry(QtCore.QRect(180, 300, 451, 25))
self.out_file_name.setObjectName('out_file_name')
self.mats_ind_inp = QtWidgets.QLineEdit(self.centralwidget)
self.mats_ind_inp.setGeometry(QtCore.QRect(240, 240, 391, 25))
self.mats_ind_inp.setObjectName('mats_ind_inp')
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(50, 240, 191, 17))
self.label_11.setObjectName('label_11')
self.real_freq_frame.raise_()
self.doit_button.raise_()
self.label_12.raise_()
self.output_directory_button.raise_()
self.save_button.raise_()
self.out_file_name.raise_()
self.mats_ind_inp.raise_()
self.label_11.raise_()
self.input_data_tabs.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName('menubar')
self.menuPade = QtWidgets.QMenu(self.menubar)
self.menuPade.setObjectName('menuPade')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuPade.menuAction())
self.retranslateUi(MainWindow)
self.input_data_tabs.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.real_freq_frame.setToolTip(_translate('MainWindow', 'configure real-frequency grid'))
self.real_freq_frame.setWhatsThis(_translate('MainWindow', 'real-frequency grid'))
self.label.setText(_translate('MainWindow', 'Real-frequency grid'))
self.label_3.setText(_translate('MainWindow', 'max'))
self.label_4.setText(_translate('MainWindow', 'n'))
self.grid_type_combo.setToolTip(_translate('MainWindow', 'equispaced or centered grid (denser around Fermi energy)'))
self.grid_type_combo.setItemText(0, _translate('MainWindow', 'equispaced symmetric'))
self.grid_type_combo.setItemText(1, _translate('MainWindow', 'equispaced positive'))
self.max_real_freq.setToolTip(_translate('MainWindow', 'upper border of real-frequency grid. (lower border is set symmetrically)'))
self.max_real_freq.setText(_translate('MainWindow', '20'))
self.num_real_freq.setToolTip(_translate('MainWindow', 'number frequencies on real axis; should be an odd number'))
self.num_real_freq.setText(_translate('MainWindow', '401'))
self.gen_real_grid_button.setText(_translate('MainWindow', 'Generate'))
self.label_2.setText(_translate('MainWindow', 'Load w2dynamics data'))
self.inp_data_type.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type.setItemText(1, _translate('MainWindow', "Green's function"))
self.label_5.setText(_translate('MainWindow', 'file name'))
self.inp_file_name.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.label_6.setText(_translate('MainWindow', 'iteration'))
self.iteration_type_combo.setItemText(0, _translate('MainWindow', 'DMFT'))
self.iteration_type_combo.setItemText(1, _translate('MainWindow', 'STAT'))
self.iteration_type_combo.setItemText(2, _translate('MainWindow', 'WORM'))
self.iteration_number.setToolTip(_translate('MainWindow', 'integer; leave empty for last iteration'))
self.label_7.setText(_translate('MainWindow', 'Atom'))
self.atom_number.setToolTip(_translate('MainWindow', 'choose inequivalent atom (one-based integer)'))
self.atom_number.setText(_translate('MainWindow', '1'))
self.label_8.setText(_translate('MainWindow', 'Orbital'))
self.orbital_number.setToolTip(_translate('MainWindow', 'choose orbital (one-based integer)'))
self.orbital_number.setText(_translate('MainWindow', '1'))
self.label_9.setText(_translate('MainWindow', 'Spin'))
self.spin_type_combo.setToolTip(_translate('MainWindow', 'choose spin up/down; average for paramagnetic system'))
self.spin_type_combo.setItemText(0, _translate('MainWindow', 'average'))
self.spin_type_combo.setItemText(1, _translate('MainWindow', 'up'))
self.spin_type_combo.setItemText(2, _translate('MainWindow', 'down'))
self.load_data_button.setText(_translate('MainWindow', 'Load data'))
self.label_10.setText(_translate('MainWindow', 'Number of Matsubara frequencies'))
self.num_mats_freq.setToolTip(_translate('MainWindow', 'How many Matsubara frequencies do you want to use for the continuation?'))
self.show_data_button.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button.setText(_translate('MainWindow', 'Show data'))
self.select_file_button.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button.setText(_translate('MainWindow', '...'))
self.ignore_checkbox.setText(_translate('MainWindow', 'Ignore real part'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.w2dyn_tab), _translate('MainWindow', ' w2dynamics file'))
self.inp_data_type_text.setItemText(0, _translate('MainWindow', 'Self-energy'))
self.inp_data_type_text.setItemText(1, _translate('MainWindow', "Green's function"))
self.inp_data_type_text.setItemText(2, _translate('MainWindow', 'bosonic'))
self.label_14.setText(_translate('MainWindow', 'Load'))
self.label_15.setText(_translate('MainWindow', 'from text file'))
self.label_16.setText(_translate('MainWindow', 'file name'))
self.inp_file_name_2.setToolTip(_translate('MainWindow', 'file path and name of a w2dynamics output file'))
self.select_file_button_2.setToolTip(_translate('MainWindow', 'choose an input file'))
self.select_file_button_2.setText(_translate('MainWindow', '...'))
self.load_data_button_2.setText(_translate('MainWindow', 'Load data'))
self.show_data_button_2.setToolTip(_translate('MainWindow', 'click this if you want to plot the data after loading'))
self.show_data_button_2.setText(_translate('MainWindow', 'Show data'))
self.label_17.setText(_translate('MainWindow', 'Skip'))
self.label_18.setText(_translate('MainWindow', 'lines at the beginning'))
self.label_19.setText(_translate('MainWindow', 'Use'))
self.label_20.setText(_translate('MainWindow', 'Matsubara frequencies'))
self.input_data_tabs.setTabText(self.input_data_tabs.indexOf(self.text_tab), _translate('MainWindow', 'text file'))
self.doit_button.setToolTip(_translate('MainWindow', 'perform the analytical continuation'))
self.doit_button.setText(_translate('MainWindow', 'Do it!'))
self.label_12.setText(_translate('MainWindow', 'Output file:'))
self.output_directory_button.setToolTip(_translate('MainWindow', 'Choose a directory, where you want to save the output'))
self.output_directory_button.setText(_translate('MainWindow', '...'))
self.save_button.setToolTip(_translate('MainWindow', 'click this button to save the output'))
self.save_button.setText(_translate('MainWindow', 'Save'))
self.out_file_name.setToolTip(_translate('MainWindow', 'type full output name here (including path)'))
self.mats_ind_inp.setToolTip(_translate('MainWindow', 'which Matsubara indices to use (start from 0). There must be AT LEAST TWO numbers'))
self.mats_ind_inp.setText(_translate('MainWindow', '0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10'))
self.label_11.setText(_translate('MainWindow', 'Use Matsubara frequencies'))
self.menuPade.setTitle(_translate('MainWindow', 'Pade'))
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
a -= ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid + maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
a -= ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid + maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
return a
|
def noise(sigma, iwgrid):
return (np.random.randn(iwgrid.shape[0]) * sigma)
|
def update_from_loss_module(monitors, output_dict, loss_update):
(tmp_monitors, tmp_outputs) = loss_update
monitors.update(tmp_monitors)
output_dict.update(tmp_outputs)
|
class Model(LeftModel):
def __init__(self, parsed_train_path, parsed_test_path, output_vocab):
self.parsed_train_path = parsed_train_path
self.parsed_test_path = parsed_test_path
logger.critical(('Train parsing: ' + self.parsed_train_path))
logger.critical(('Test parsing: ' + self.parsed_test_path))
domain = make_domain(self.parsed_test_path)
super().__init__(domain, output_vocab)
from left.generalized_fol_executor import NCGeneralizedFOLExecutor
self.executor = NCGeneralizedFOLExecutor(self.domain, self.parser, allow_shift_grounding=True)
train_utterance_to_parsed_dict = io.load_pkl(self.parsed_train_path)
test_utterance_to_parsed_dict = io.load_pkl(self.parsed_test_path)
utterance_to_parsed_dict = train_utterance_to_parsed_dict.copy()
utterance_to_parsed_dict.update(test_utterance_to_parsed_dict)
self.utterance_to_parsed_dict = utterance_to_parsed_dict
self.attribute_concepts.sort()
logger.critical(('Num attribute concepts: ' + str(len(self.attribute_concepts))))
k = self.attribute_concepts
v = list(range(len(self.attribute_concepts)))
self.attribute_class_to_idx = dict(zip(k, v))
def forward(self, feed_dict):
feed_dict = GView(feed_dict)
(monitors, outputs) = ({}, {})
f_sng = self.forward_sng(feed_dict)
(results, executions, parsings, scored) = (list(), list(), list(), list())
for i in range(len(feed_dict.program_tree)):
with self.executor.with_grounding(self.grounding_cls(f_sng[i], self, self.training, self.attribute_class_to_idx, None)):
this_input_str = feed_dict.question_text[i]
parsing_list = tuple([self.utterance_to_parsed_dict[this_input_str]])
parsing = self.parser.parse_expression(parsing_list[0])
execution = self.executor.execute(parsing).tensor
program = execution
results.append((parsing, program, execution))
executions.append(execution)
parsings.append(parsing)
scored.append(1)
outputs['parsing'] = parsings
outputs['results'] = results
outputs['executions'] = executions
outputs['scored'] = scored
update_from_loss_module(monitors, outputs, self.qa_loss(outputs['executions'], feed_dict.answer, feed_dict.question_type))
if self.training:
loss = monitors['loss/qa']
return (loss, monitors, outputs)
else:
outputs['monitors'] = monitors
return outputs
def extract_concepts(self, domain):
from left.domain import read_concepts_v2
(_, arity_2, arity_3) = read_concepts_v2(domain)
from concepts.benchmark.vision_language.babel_qa.humanmotion_constants import attribute_concepts_mapping
arity_1 = ((attribute_concepts_mapping['Motion'] + attribute_concepts_mapping['Part']) + attribute_concepts_mapping['Direction'])
return (arity_1, arity_2, arity_3)
def forward_sng(self, feed_dict):
(motion_encodings, motion_encodings_rel, motion_encodings_output_vocab) = self.scene_graph(feed_dict.joints)
f_sng = []
start_seg = 0
for seq_num_segs in feed_dict.num_segs:
f_sng.append({'attribute': motion_encodings[start_seg:(start_seg + seq_num_segs)], 'relation': motion_encodings_rel[start_seg:(start_seg + seq_num_segs)], 'output_vocab': motion_encodings_output_vocab[start_seg:(start_seg + seq_num_segs)]})
start_seg += seq_num_segs
assert (start_seg == motion_encodings.size()[0])
return f_sng
|
def make_model(parsed_train_path, parsed_test_path, output_vocab):
return Model(parsed_train_path, parsed_test_path, output_vocab)
|
def make_dataset(mode, scenes_json, questions_json, image_root, output_vocab_json):
return make_custom_transfer_dataset(scenes_json, questions_json, image_root=image_root, output_vocab_json=output_vocab_json, query_list_key=g_query_list_keys[mode], custom_fields=[], incl_scene=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.