repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DiffMIC | DiffMIC-main/diffusion_trainer.py | import logging
import time
import gc
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
from scipy.stats import ttest_rel
from tqdm import tqdm
from ema import EMA
from model import *
from pretraining.dcg import DCG as AuxCls
from pretraining.resnet import ResNet18
from utils import *
from diffusion_utils import *
from tqdm import tqdm
plt.style.use('ggplot')
class Diffusion(object):
def __init__(self, args, config, device=None):
self.args = args
self.config = config
if device is None:
device = (
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
self.model_var_type = config.model.var_type
self.num_timesteps = config.diffusion.timesteps
self.test_num_timesteps = config.diffusion.test_timesteps
self.vis_step = config.diffusion.vis_step
self.num_figs = config.diffusion.num_figs
betas = make_beta_schedule(schedule=config.diffusion.beta_schedule, num_timesteps=self.num_timesteps,
start=config.diffusion.beta_start, end=config.diffusion.beta_end)
betas = self.betas = betas.float().to(self.device)
self.betas_sqrt = torch.sqrt(betas)
alphas = 1.0 - betas
self.alphas = alphas
self.one_minus_betas_sqrt = torch.sqrt(alphas)
alphas_cumprod = alphas.cumprod(dim=0)
self.alphas_bar_sqrt = torch.sqrt(alphas_cumprod)
self.one_minus_alphas_bar_sqrt = torch.sqrt(1 - alphas_cumprod)
if config.diffusion.beta_schedule == "cosine":
self.one_minus_alphas_bar_sqrt *= 0.9999 # avoid division by 0 for 1/sqrt(alpha_bar_t) during inference
alphas_cumprod_prev = torch.cat(
[torch.ones(1).to(device), alphas_cumprod[:-1]], dim=0
)
self.alphas_cumprod_prev = alphas_cumprod_prev
self.posterior_mean_coeff_1 = (
betas * torch.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)
)
self.posterior_mean_coeff_2 = (
torch.sqrt(alphas) * (1 - alphas_cumprod_prev) / (1 - alphas_cumprod)
)
posterior_variance = (
betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
)
self.posterior_variance = posterior_variance
if self.model_var_type == "fixedlarge":
self.logvar = betas.log()
# torch.cat(
# [posterior_variance[1:2], betas[1:]], dim=0).log()
elif self.model_var_type == "fixedsmall":
self.logvar = posterior_variance.clamp(min=1e-20).log()
# initial prediction model as guided condition
if config.diffusion.apply_aux_cls:
self.cond_pred_model = AuxCls(config).to(self.device)
self.aux_cost_function = nn.CrossEntropyLoss()
else:
pass
# scaling temperature for NLL and ECE computation
self.tuned_scale_T = None
# Compute guiding prediction as diffusion condition
def compute_guiding_prediction(self, x):
"""
Compute y_0_hat, to be used as the Gaussian mean at time step T.
"""
if self.config.model.arch == "simple" or \
(self.config.model.arch == "linear" and self.config.data.dataset == "MNIST"):
x = torch.flatten(x, 1)
#y_pred = self.cond_pred_model(x)
y_pred, y_global, y_local = self.cond_pred_model(x)
return y_pred, y_global, y_local
def evaluate_guidance_model(self, dataset_loader):
"""
Evaluate guidance model by reporting train or test set accuracy.
"""
y_acc_list = []
for step, feature_label_set in tqdm(enumerate(dataset_loader)):
# logging.info("\nEvaluating test Minibatch {}...\n".format(step))
# minibatch_start = time.time()
x_batch, y_labels_batch = feature_label_set
y_labels_batch = y_labels_batch.reshape(-1, 1)
y_pred_prob,_,_ = self.compute_guiding_prediction(
x_batch.to(self.device)) # (batch_size, n_classes)
y_pred_prob = y_pred_prob.softmax(dim=1)
y_pred_label = torch.argmax(y_pred_prob, 1, keepdim=True).cpu().detach().numpy() # (batch_size, 1)
y_labels_batch = y_labels_batch.cpu().detach().numpy()
y_acc = y_pred_label == y_labels_batch # (batch_size, 1)
#print(y_acc)
if len(y_acc_list) == 0:
y_acc_list = y_acc
else:
y_acc_list = np.concatenate([y_acc_list, y_acc], axis=0)
y_acc_all = np.mean(y_acc_list)
return y_acc_all
def nonlinear_guidance_model_train_step(self, x_batch, y_batch, aux_optimizer):
"""
One optimization step of the non-linear guidance model that predicts y_0_hat.
"""
y_batch_pred,y_global,y_local = self.compute_guiding_prediction(x_batch)
# y_batch_pred = y_batch_pred.softmax(dim=1)
#aux_cost = self.aux_cost_function(y_batch_pred, y_batch)+self.aux_cost_function(y_global, y_batch)+self.aux_cost_function(y_local, y_batch)
aux_cost = self.aux_cost_function(y_batch_pred, y_batch)
# update non-linear guidance model
aux_optimizer.zero_grad()
aux_cost.backward()
aux_optimizer.step()
return aux_cost.cpu().item()
def train(self):
args = self.args
config = self.config
tb_logger = self.config.tb_logger
data_object, train_dataset, test_dataset = get_dataset(args, config)
print('loading dataset..')
train_loader = data.DataLoader(
train_dataset,
batch_size=config.training.batch_size,
shuffle=True,
num_workers=config.data.num_workers,
#sampler=sampler
)
test_loader = data.DataLoader(
test_dataset,
batch_size=config.testing.batch_size,
shuffle=False,
num_workers=config.data.num_workers,
)
print('successfully load')
model = ConditionalModel(config, guidance=config.diffusion.include_guidance)
model = model.to(self.device)
y_acc_aux_model = self.evaluate_guidance_model(test_loader)
logging.info("\nBefore training, the guidance classifier accuracy on the test set is {:.8f}.\n\n".format(
y_acc_aux_model))
optimizer = get_optimizer(self.config.optim, model.parameters())
criterion = nn.CrossEntropyLoss()
brier_score = nn.MSELoss()
# apply an auxiliary optimizer for the guidance classifier
if config.diffusion.apply_aux_cls:
aux_optimizer = get_optimizer(self.config.aux_optim,
self.cond_pred_model.parameters())
if self.config.model.ema:
ema_helper = EMA(mu=self.config.model.ema_rate)
ema_helper.register(model)
else:
ema_helper = None
if config.diffusion.apply_aux_cls:
if hasattr(config.diffusion, "trained_aux_cls_ckpt_path"): # load saved auxiliary classifier
aux_states = torch.load(os.path.join(config.diffusion.trained_aux_cls_ckpt_path,
config.diffusion.trained_aux_cls_ckpt_name),
map_location=self.device)
self.cond_pred_model.load_state_dict(aux_states['state_dict'], strict=True)
self.cond_pred_model.eval()
elif hasattr(config.diffusion, "trained_aux_cls_log_path"):
aux_states = torch.load(os.path.join(config.diffusion.trained_aux_cls_log_path, "aux_ckpt.pth"),
map_location=self.device)
self.cond_pred_model.load_state_dict(aux_states[0], strict=True)
self.cond_pred_model.eval()
else: # pre-train the guidance auxiliary classifier
assert config.diffusion.aux_cls.pre_train
self.cond_pred_model.train()
pretrain_start_time = time.time()
for epoch in range(config.diffusion.aux_cls.n_pretrain_epochs):
for feature_label_set in train_loader:
if config.data.dataset == "gaussian_mixture":
x_batch, y_one_hot_batch, y_logits_batch, y_labels_batch = feature_label_set
else:
x_batch, y_labels_batch = feature_label_set
y_one_hot_batch, y_logits_batch = cast_label_to_one_hot_and_prototype(y_labels_batch,
config)
aux_loss = self.nonlinear_guidance_model_train_step(x_batch.to(self.device),
y_one_hot_batch.to(self.device),
aux_optimizer)
if epoch % config.diffusion.aux_cls.logging_interval == 0:
logging.info(
f"epoch: {epoch}, guidance auxiliary classifier pre-training loss: {aux_loss}"
)
pretrain_end_time = time.time()
logging.info("\nPre-training of guidance auxiliary classifier took {:.4f} minutes.\n".format(
(pretrain_end_time - pretrain_start_time) / 60))
# save auxiliary model after pre-training
aux_states = [
self.cond_pred_model.state_dict(),
aux_optimizer.state_dict(),
]
torch.save(aux_states, os.path.join(self.args.log_path, "aux_ckpt.pth"))
# report accuracy on both training and test set for the pre-trained auxiliary classifier
y_acc_aux_model = self.evaluate_guidance_model(train_loader)
logging.info("\nAfter pre-training, guidance classifier accuracy on the training set is {:.8f}.".format(
y_acc_aux_model))
y_acc_aux_model = self.evaluate_guidance_model(test_loader)
logging.info("\nAfter pre-training, guidance classifier accuracy on the test set is {:.8f}.\n".format(
y_acc_aux_model))
if not self.args.train_guidance_only:
start_epoch, step = 0, 0
if self.args.resume_training:
states = torch.load(os.path.join(self.args.log_path, "ckpt.pth"),
map_location=self.device)
model.load_state_dict(states[0])
states[1]["param_groups"][0]["eps"] = self.config.optim.eps
optimizer.load_state_dict(states[1])
start_epoch = states[2]
step = states[3]
if self.config.model.ema:
ema_helper.load_state_dict(states[4])
# load auxiliary model
if config.diffusion.apply_aux_cls and (
hasattr(config.diffusion, "trained_aux_cls_ckpt_path") is False) and (
hasattr(config.diffusion, "trained_aux_cls_log_path") is False):
aux_states = torch.load(os.path.join(self.args.log_path, "aux_ckpt.pth"),
map_location=self.device)
self.cond_pred_model.load_state_dict(aux_states[0])
aux_optimizer.load_state_dict(aux_states[1])
max_accuracy = 0.0
if config.diffusion.noise_prior: # apply 0 instead of f_phi(x) as prior mean
logging.info("Prior distribution at timestep T has a mean of 0.")
if args.add_ce_loss:
logging.info("Apply cross entropy as an auxiliary loss during training.")
for epoch in range(start_epoch, self.config.training.n_epochs):
data_start = time.time()
data_time = 0
for i, feature_label_set in enumerate(train_loader):
if config.data.dataset == "gaussian_mixture":
x_batch, y_one_hot_batch, y_logits_batch, y_labels_batch = feature_label_set
else:
x_batch, y_labels_batch = feature_label_set
y_one_hot_batch, y_logits_batch = cast_label_to_one_hot_and_prototype(y_labels_batch, config)
# y_labels_batch = y_labels_batch.reshape(-1, 1)
if config.optim.lr_schedule:
adjust_learning_rate(optimizer, i / len(train_loader) + epoch, config)
n = x_batch.size(0)
# record unflattened x as input to guidance aux classifier
x_unflat_batch = x_batch.to(self.device)
if config.data.dataset == "toy" or config.model.arch in ["simple", "linear"]:
x_batch = torch.flatten(x_batch, 1)
data_time += time.time() - data_start
model.train()
self.cond_pred_model.eval()
step += 1
# antithetic sampling
t = torch.randint(
low=0, high=self.num_timesteps, size=(n // 2 + 1,)
).to(self.device)
t = torch.cat([t, self.num_timesteps - 1 - t], dim=0)[:n]
# noise estimation loss
x_batch = x_batch.to(self.device)
# y_0_batch = y_logits_batch.to(self.device)
y_0_hat_batch, y_0_global, y_0_local = self.compute_guiding_prediction(x_unflat_batch)
y_0_hat_batch = y_0_hat_batch.softmax(dim=1)
y_0_global,y_0_local = y_0_global.softmax(dim=1),y_0_local.softmax(dim=1)
y_T_mean = y_0_hat_batch
if config.diffusion.noise_prior: # apply 0 instead of f_phi(x) as prior mean
y_T_mean = torch.zeros(y_0_hat_batch.shape).to(y_0_hat_batch.device)
y_0_batch = y_one_hot_batch.to(self.device)
e = torch.randn_like(y_0_batch).to(y_0_batch.device)
y_t_batch = q_sample(y_0_batch, y_T_mean,
self.alphas_bar_sqrt, self.one_minus_alphas_bar_sqrt, t, noise=e)
y_t_batch_global = q_sample(y_0_batch, y_0_global,
self.alphas_bar_sqrt, self.one_minus_alphas_bar_sqrt, t, noise=e)
y_t_batch_local = q_sample(y_0_batch, y_0_local,
self.alphas_bar_sqrt, self.one_minus_alphas_bar_sqrt, t, noise=e)
# output = model(x_batch, y_t_batch, t, y_T_mean)
output = model(x_batch, y_t_batch, t, y_0_hat_batch)
output_global = model(x_batch, y_t_batch_global, t, y_0_global)
output_local = model(x_batch, y_t_batch_local, t, y_0_local)
#e_z = torch.randn_like(z_out).to(z_out.device)
# loss = (e - output).square().mean()
loss = (e - output).square().mean() + 0.5*(compute_mmd(e,output_global) + compute_mmd(e,output_local)) # use the same noise sample e during training to compute loss
#loss = compute_mmd(e, output)
#losses = loss_function(train_batch, output, e, z)
#loss = losses['loss']
# cross-entropy for y_0 reparameterization
loss0 = torch.tensor([0])
if args.add_ce_loss:
y_0_reparam_batch = y_0_reparam(model, x_batch, y_t_batch, y_0_hat_batch, y_T_mean, t,
self.one_minus_alphas_bar_sqrt)
raw_prob_batch = -(y_0_reparam_batch - 1) ** 2
loss0 = criterion(raw_prob_batch, y_labels_batch.to(self.device))
loss += config.training.lambda_ce * loss0
if not tb_logger is None:
tb_logger.add_scalar("loss", loss, global_step=step)
if step % self.config.training.logging_freq == 0 or step == 1:
logging.info(
(
f"epoch: {epoch}, step: {step}, CE loss: {loss0.item()}, "
f"Noise Estimation loss: {loss.item()}, " +
f"data time: {data_time / (i + 1)}"
)
)
# optimize diffusion model that predicts eps_theta
optimizer.zero_grad()
loss.backward()
try:
torch.nn.utils.clip_grad_norm_(
model.parameters(), config.optim.grad_clip
)
except Exception:
pass
optimizer.step()
if self.config.model.ema:
ema_helper.update(model)
# joint train aux classifier along with diffusion model
if config.diffusion.apply_aux_cls and config.diffusion.aux_cls.joint_train:
self.cond_pred_model.train()
aux_loss = self.nonlinear_guidance_model_train_step(x_unflat_batch, y_0_batch,
aux_optimizer)
if step % self.config.training.logging_freq == 0 or step == 1:
logging.info(
f"meanwhile, guidance auxiliary classifier joint-training loss: {aux_loss}"
)
# save diffusion model
if step % self.config.training.snapshot_freq == 0 or step == 1:
states = [
model.state_dict(),
optimizer.state_dict(),
epoch,
step,
]
if self.config.model.ema:
states.append(ema_helper.state_dict())
if step > 1: # skip saving the initial ckpt
torch.save(
states,
os.path.join(self.args.log_path, "ckpt_{}.pth".format(step)),
)
# save current states
torch.save(states, os.path.join(self.args.log_path, "ckpt.pth"))
# save auxiliary model
if config.diffusion.apply_aux_cls and config.diffusion.aux_cls.joint_train:
aux_states = [
self.cond_pred_model.state_dict(),
aux_optimizer.state_dict(),
]
if step > 1: # skip saving the initial ckpt
torch.save(
aux_states,
os.path.join(self.args.log_path, "aux_ckpt_{}.pth".format(step)),
)
torch.save(aux_states, os.path.join(self.args.log_path, "aux_ckpt.pth"))
data_start = time.time()
logging.info(
(f"epoch: {epoch}, step: {step}, CE loss: {loss0.item()}, Noise Estimation loss: {loss.item()}, " +
f"data time: {data_time / (i + 1)}")
)
# Evaluate
if epoch % self.config.training.validation_freq == 0 \
or epoch + 1 == self.config.training.n_epochs:
model.eval()
self.cond_pred_model.eval()
acc_avg = 0.
kappa_avg = 0.
y1_true=None
y1_pred=None
for test_batch_idx, (images, target) in enumerate(test_loader):
images_unflat = images.to(self.device)
if config.data.dataset == "toy" \
or config.model.arch == "simple" \
or config.model.arch == "linear":
images = torch.flatten(images, 1)
images = images.to(self.device)
target = target.to(self.device)
# target_vec = nn.functional.one_hot(target).float().to(self.device)
with torch.no_grad():
target_pred, y_global, y_local = self.compute_guiding_prediction(images_unflat)
target_pred = target_pred.softmax(dim=1)
# prior mean at timestep T
y_T_mean = target_pred
if config.diffusion.noise_prior: # apply 0 instead of f_phi(x) as prior mean
y_T_mean = torch.zeros(target_pred.shape).to(target_pred.device)
if not config.diffusion.noise_prior: # apply f_phi(x) instead of 0 as prior mean
target_pred, y_global, y_local = self.compute_guiding_prediction(images_unflat)
target_pred = target_pred.softmax(dim=1)
label_t_0 = p_sample_loop(model, images, target_pred, y_T_mean,
self.num_timesteps, self.alphas,
self.one_minus_alphas_bar_sqrt,
only_last_sample=True)
y1_pred = torch.cat([y1_pred, label_t_0]) if y1_pred is not None else label_t_0
y1_true = torch.cat([y1_true, target]) if y1_true is not None else target
acc_avg += accuracy(label_t_0.detach().cpu(), target.cpu())[0].item()
kappa_avg = cohen_kappa(y1_pred.detach().cpu(), y1_true.cpu()).item()
f1_avg = compute_f1_score(y1_true,y1_pred).item()
acc_avg /= (test_batch_idx + 1)
#kappa_avg /= (test_batch_idx + 1)
if acc_avg > max_accuracy:
logging.info("Update best accuracy at Epoch {}.".format(epoch))
states = [
model.state_dict(),
optimizer.state_dict(),
epoch,
step,
]
torch.save(states, os.path.join(self.args.log_path, "ckpt_best.pth"))
aux_states = [
self.cond_pred_model.state_dict(),
aux_optimizer.state_dict(),
]
torch.save(aux_states, os.path.join(self.args.log_path, "aux_ckpt_best.pth"))
max_accuracy = max(max_accuracy, acc_avg)
if not tb_logger is None:
tb_logger.add_scalar('accuracy', acc_avg, global_step=step)
logging.info(
(
f"epoch: {epoch}, step: {step}, " +
f"Average accuracy: {acc_avg}, Average Kappa: {kappa_avg}, Average F1: {f1_avg}," +
f"Max accuracy: {max_accuracy:.2f}%"
)
)
# save the model after training is finished
states = [
model.state_dict(),
optimizer.state_dict(),
epoch,
step,
]
if self.config.model.ema:
states.append(ema_helper.state_dict())
torch.save(states, os.path.join(self.args.log_path, "ckpt.pth"))
# save auxiliary model after training is finished
if config.diffusion.apply_aux_cls and config.diffusion.aux_cls.joint_train:
aux_states = [
self.cond_pred_model.state_dict(),
aux_optimizer.state_dict(),
]
torch.save(aux_states, os.path.join(self.args.log_path, "aux_ckpt.pth"))
# report training set accuracy if applied joint training
y_acc_aux_model = self.evaluate_guidance_model(train_loader)
logging.info("After joint-training, guidance classifier accuracy on the training set is {:.8f}.".format(
y_acc_aux_model))
# report test set accuracy if applied joint training
y_acc_aux_model = self.evaluate_guidance_model(test_loader)
logging.info("After joint-training, guidance classifier accuracy on the test set is {:.8f}.".format(
y_acc_aux_model))
def test(self):
args = self.args
config = self.config
data_object, train_dataset, test_dataset = get_dataset(args, config)
log_path = os.path.join(self.args.log_path)
train_loader = data.DataLoader(
train_dataset,
batch_size=config.training.batch_size,
shuffle=True,
num_workers=config.data.num_workers,
)
test_loader = data.DataLoader(
test_dataset,
batch_size=config.testing.batch_size,
shuffle=False,
num_workers=config.data.num_workers,
)
model = ConditionalModel(config, guidance=config.diffusion.include_guidance)
if getattr(self.config.testing, "ckpt_id", None) is None:
if args.eval_best:
ckpt_id = 'best'
states = torch.load(os.path.join(log_path, f"ckpt_{ckpt_id}.pth"),
map_location=self.device)
else:
ckpt_id = 'last'
states = torch.load(os.path.join(log_path, "ckpt.pth"),
map_location=self.device)
else:
states = torch.load(os.path.join(log_path, f"ckpt_{self.config.testing.ckpt_id}.pth"),
map_location=self.device)
ckpt_id = self.config.testing.ckpt_id
logging.info(f"Loading from: {log_path}/ckpt_{ckpt_id}.pth")
model = model.to(self.device)
model.load_state_dict(states[0], strict=True)
num_params = 0
for param in model.parameters():
num_params += param.numel()
#for param in self.cond_pred_model.parameters():
# num_params += param.numel()
#print('Total number of parameters: %d' % num_params)
# load auxiliary model
if config.diffusion.apply_aux_cls:
if hasattr(config.diffusion, "trained_aux_cls_ckpt_path"):
aux_states = torch.load(os.path.join(config.diffusion.trained_aux_cls_ckpt_path,
config.diffusion.trained_aux_cls_ckpt_name),
map_location=self.device)
self.cond_pred_model.load_state_dict(aux_states['state_dict'], strict=True)
else:
aux_cls_path = log_path
if hasattr(config.diffusion, "trained_aux_cls_log_path"):
aux_cls_path = config.diffusion.trained_aux_cls_log_path
aux_states = torch.load(os.path.join(aux_cls_path, "aux_ckpt_best.pth"),
map_location=self.device)
self.cond_pred_model.load_state_dict(aux_states[0], strict=False)
logging.info(f"Loading from: {aux_cls_path}/aux_ckpt_best.pth")
# Evaluate
model.eval()
self.cond_pred_model.eval()
acc_avg = 0.
kappa_avg = 0.
y1_true = None
y1_pred = None
for test_batch_idx, (images, target) in enumerate(test_loader):
# if test_batch_idx > 3:
# continue
images_unflat = images.to(self.device)
images = images.to(self.device)
target = target.to(self.device)
target_vec = nn.functional.one_hot(target, num_classes=config.data.num_classes).float().to(self.device)
with torch.no_grad():
target_pred, y_global, y_local = self.compute_guiding_prediction(images_unflat)
target_pred = target_pred.softmax(dim=1)
# prior mean at timestep T
y_T_mean = target_pred
if config.diffusion.noise_prior: # apply 0 instead of f_phi(x) as prior mean
y_T_mean = torch.zeros(target_pred.shape).to(target_pred.device)
if not config.diffusion.noise_prior: # apply f_phi(x) instead of 0 as prior mean
target_pred, y_global, y_local = self.compute_guiding_prediction(images_unflat)
target_pred = target_pred.softmax(dim=1)
label_t_0 = p_sample_loop(model, images, target_pred, y_T_mean,
self.test_num_timesteps, self.alphas,
self.one_minus_alphas_bar_sqrt,
only_last_sample=True)
#print(label_t_0.shape)
label_t_0 = label_t_0.softmax(dim=-1)
acc_avg += accuracy(label_t_0.detach().cpu(), target.cpu())[0].item()
kappa_avg += cohen_kappa(label_t_0.detach().cpu(), target.cpu()).item()
y1_pred = torch.cat([y1_pred, label_t_0]) if y1_pred is not None else label_t_0
y1_true = torch.cat([y1_true, target]) if y1_true is not None else target
f1_avg = compute_f1_score(y1_true,y1_pred)
acc_avg /= (test_batch_idx + 1)
kappa_avg /= (test_batch_idx + 1)
logging.info(
(
f"[Test:] Average accuracy: {acc_avg}, Average Kappa: {kappa_avg}, F1: {f1_avg}"
)
)
| 31,018 | 51.843271 | 185 | py |
DiffMIC | DiffMIC-main/utils.py | import random
import math
import numpy as np
import argparse
import torch
import torch.optim as optim
import torchvision
from torch import nn
from torchvision import transforms
from dataloader.loading import *
import torch.nn.functional as F
def set_random_seed(seed):
print(f"\n* Set seed {seed}")
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
def sizeof_fmt(num, suffix='B'):
"""
https://stackoverflow.com/questions/24455615/python-how-to-display-size-of-all-variables
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
# print("Check memory usage of different variables:")
# for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),
# key=lambda x: -x[1])[:10]:
# print("{:>30}: {:>8}".format(name, sizeof_fmt(size)))
def get_optimizer(config_optim, parameters):
if config_optim.optimizer == 'Adam':
return optim.Adam(parameters, lr=config_optim.lr, weight_decay=config_optim.weight_decay,
betas=(config_optim.beta1, 0.999), amsgrad=config_optim.amsgrad,
eps=config_optim.eps)
elif config_optim.optimizer == 'RMSProp':
return optim.RMSprop(parameters, lr=config_optim.lr, weight_decay=config_optim.weight_decay)
elif config_optim.optimizer == 'SGD':
return optim.SGD(parameters, lr=config_optim.lr, weight_decay=1e-4, momentum=0.9)
else:
raise NotImplementedError(
'Optimizer {} not understood.'.format(config_optim.optimizer))
def get_optimizer_and_scheduler(config, parameters, epochs, init_epoch):
scheduler = None
optimizer = get_optimizer(config, parameters)
if hasattr(config, "T_0"):
T_0 = config.T_0
else:
T_0 = epochs // (config.n_restarts + 1)
if config.use_scheduler:
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,
T_0=T_0,
T_mult=config.T_mult,
eta_min=config.eta_min,
last_epoch=-1)
scheduler.last_epoch = init_epoch - 1
return optimizer, scheduler
def adjust_learning_rate(optimizer, epoch, config):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < config.training.warmup_epochs:
lr = config.optim.lr * epoch / config.training.warmup_epochs
else:
lr = config.optim.min_lr + (config.optim.lr - config.optim.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - config.training.warmup_epochs) / (
config.training.n_epochs - config.training.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
def get_dataset(args, config):
data_object = None
if config.data.dataset == "PLACENTAL":
train_dataset = BUDataset(data_list=config.data.traindata, train=True)
test_dataset = BUDataset(data_list=config.data.testdata, train=False)
elif config.data.dataset == "APTOS":
train_dataset = APTOSDataset(data_list=config.data.traindata, train=True)
test_dataset = APTOSDataset(data_list=config.data.testdata, train=False)
elif config.data.dataset == "ISIC":
train_dataset = ISICDataset(data_list=config.data.traindata, train=True)
test_dataset = ISICDataset(data_list=config.data.testdata, train=False)
else:
raise NotImplementedError(
"Options: toy (classification of two Gaussian), MNIST, FashionMNIST, CIFAR10.")
return data_object, train_dataset, test_dataset
from sklearn.metrics import cohen_kappa_score
# ------------------------------------------------------------------------------------
# Revised from timm == 0.3.2
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/utils/metrics.py
# output: the prediction from diffusion model (B x n_classes)
# target: label indices (B)
# ------------------------------------------------------------------------------------
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k.
"""
maxk = min(max(topk), output.size()[1])
# output = torch.softmax(-(output - 1)**2, dim=-1)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def cohen_kappa(output, target, topk=(1,)):
maxk = min(max(topk), output.size()[1])
_, pred = output.topk(maxk, 1, True, True)
kappa = cohen_kappa_score(pred, target, weights='quadratic')
return kappa
def cast_label_to_one_hot_and_prototype(y_labels_batch, config, return_prototype=True):
"""
y_labels_batch: a vector of length batch_size.
"""
y_one_hot_batch = nn.functional.one_hot(y_labels_batch, num_classes=config.data.num_classes).float()
if return_prototype:
label_min, label_max = config.data.label_min_max
y_logits_batch = torch.logit(nn.functional.normalize(
torch.clip(y_one_hot_batch, min=label_min, max=label_max), p=1.0, dim=1))
return y_one_hot_batch, y_logits_batch
else:
return y_one_hot_batch
import numpy as np
import sklearn.metrics as metrics
#from imblearn.metrics import sensitivity_score, specificity_score
import pdb
# from sklearn.metrics.ranking import roc_auc_score
from sklearn.metrics import accuracy_score, balanced_accuracy_score, cohen_kappa_score
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix
def compute_isic_metrics(gt, pred):
gt_np = gt.cpu().detach().numpy()
pred_np = pred.cpu().detach().numpy()
gt_class = np.argmax(gt_np, axis=1)
pred_class = np.argmax(pred_np, axis=1)
ACC = accuracy_score(gt_class, pred_class)
BACC = balanced_accuracy_score(gt_class, pred_class) # balanced accuracy
Prec = precision_score(gt_class, pred_class, average='macro')
Rec = recall_score(gt_class, pred_class, average='macro')
F1 = f1_score(gt_class, pred_class, average='macro')
AUC_ovo = metrics.roc_auc_score(gt_np, pred_np, average='macro', multi_class='ovo')
#AUC_macro = metrics.roc_auc_score(gt_class, pred_np, average='macro', multi_class='ovo')
#SPEC = specificity_score(gt_class, pred_class, average='macro')
kappa = cohen_kappa_score(gt_class, pred_class, weights='quadratic')
# print(confusion_matrix(gt_class, pred_class))
return ACC, BACC, Prec, Rec, F1, AUC_ovo, kappa
#return ACC, BACC, Prec, Rec, F1, AUC_ovo, AUC_macro, SPEC, kappa
def compute_f1_score(gt, pred):
gt_class = gt.cpu().detach().numpy()
pred_np = pred.cpu().detach().numpy()
#gt_class = np.argmax(gt_np, axis=1)
pred_class = np.argmax(pred_np, axis=1)
F1 = f1_score(gt_class, pred_class, average='macro')
#AUC_ovo = metrics.roc_auc_score(gt_np, pred_np, average='macro', multi_class='ovo')
#AUC_macro = metrics.roc_auc_score(gt_class, pred_np, average='macro', multi_class='ovo')
#SPEC = specificity_score(gt_class, pred_class, average='macro')
# print(confusion_matrix(gt_class, pred_class))
return F1
| 8,144 | 39.321782 | 104 | py |
DiffMIC | DiffMIC-main/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet18, resnet50
from torchvision.models.densenet import densenet121
from timm.models import create_model
import numpy as np
class ConditionalLinear(nn.Module):
def __init__(self, num_in, num_out, n_steps):
super(ConditionalLinear, self).__init__()
self.num_out = num_out
self.lin = nn.Linear(num_in, num_out)
self.embed = nn.Embedding(n_steps, num_out)
self.embed.weight.data.uniform_()
def forward(self, x, t):
out = self.lin(x)
gamma = self.embed(t)
out = gamma.view(-1, self.num_out) * out
return out
class ConditionalModel(nn.Module):
def __init__(self, config, guidance=False):
super(ConditionalModel, self).__init__()
n_steps = config.diffusion.timesteps + 1
data_dim = config.model.data_dim
y_dim = config.data.num_classes
arch = config.model.arch
feature_dim = config.model.feature_dim
hidden_dim = config.model.hidden_dim
self.guidance = guidance
# encoder for x
self.encoder_x = ResNetEncoder(arch=arch, feature_dim=feature_dim)
# batch norm layer
self.norm = nn.BatchNorm1d(feature_dim)
# Unet
if self.guidance:
self.lin1 = ConditionalLinear(y_dim * 2, feature_dim, n_steps)
else:
self.lin1 = ConditionalLinear(y_dim, feature_dim, n_steps)
self.unetnorm1 = nn.BatchNorm1d(feature_dim)
self.lin2 = ConditionalLinear(feature_dim, feature_dim, n_steps)
self.unetnorm2 = nn.BatchNorm1d(feature_dim)
self.lin3 = ConditionalLinear(feature_dim, feature_dim, n_steps)
self.unetnorm3 = nn.BatchNorm1d(feature_dim)
self.lin4 = nn.Linear(feature_dim, y_dim)
def forward(self, x, y, t, yhat=None):
x = self.encoder_x(x)
x = self.norm(x)
if self.guidance:
#for yh in yhat:
y = torch.cat([y, yhat], dim=-1)
y = self.lin1(y, t)
y = self.unetnorm1(y)
y = F.softplus(y)
y = x * y
y = self.lin2(y, t)
y = self.unetnorm2(y)
y = F.softplus(y)
y = self.lin3(y, t)
y = self.unetnorm3(y)
y = F.softplus(y)
y = self.lin4(y)
return y
# ResNet 18 or 50 as image encoder
class ResNetEncoder(nn.Module):
def __init__(self, arch='resnet18', feature_dim=128):
super(ResNetEncoder, self).__init__()
self.f = []
#print(arch)
if arch == 'resnet50':
backbone = resnet50()
self.featdim = backbone.fc.weight.shape[1]
elif arch == 'resnet18':
backbone = resnet18()
self.featdim = backbone.fc.weight.shape[1]
elif arch == 'densenet121':
backbone = densenet121(pretrained=True)
self.featdim = backbone.classifier.weight.shape[1]
elif arch == 'vit':
backbone = create_model('pvt_v2_b2',
pretrained=True,
num_classes=4,
drop_rate=0,
drop_path_rate=0.1,
drop_block_rate=None,
)
backbone.head = nn.Sequential()
self.featdim = 512
for name, module in backbone.named_children():
#if not isinstance(module, nn.Linear):
# self.f.append(module)
if name != 'fc':
self.f.append(module)
# encoder
self.f = nn.Sequential(*self.f)
#print(self.featdim)
self.g = nn.Linear(self.featdim, feature_dim)
#self.z = nn.Linear(feature_dim, 4)
def forward_feature(self, x):
feature = self.f(x)
#x = x.mean(dim=1)
feature = torch.flatten(feature, start_dim=1)
feature = self.g(feature)
return feature
def forward(self, x):
feature = self.forward_feature(x)
return feature
| 3,986 | 30.393701 | 74 | py |
DiffMIC | DiffMIC-main/ema.py | import torch.nn as nn
class EMA(object):
def __init__(self, mu=0.999):
self.mu = mu
self.shadow = {}
def register(self, module):
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self, module):
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name].data = (1. - self.mu) * param.data + self.mu * self.shadow[name].data
def ema(self, module):
for name, param in module.named_parameters():
if param.requires_grad:
param.data.copy_(self.shadow[name].data)
def ema_copy(self, module):
module_copy = type(module)(module.config).to(module.config.device)
module_copy.load_state_dict(module.state_dict())
self.ema(module_copy)
return module_copy
def state_dict(self):
return self.shadow
def load_state_dict(self, state_dict):
self.shadow = state_dict
| 1,053 | 30 | 103 | py |
DiffMIC | DiffMIC-main/diffusion_utils.py | import math
import torch
import numpy as np
def make_beta_schedule(schedule="linear", num_timesteps=1000, start=1e-5, end=1e-2):
if schedule == "linear":
betas = torch.linspace(start, end, num_timesteps)
elif schedule == "const":
betas = end * torch.ones(num_timesteps)
elif schedule == "quad":
betas = torch.linspace(start ** 0.5, end ** 0.5, num_timesteps) ** 2
elif schedule == "jsd":
betas = 1.0 / torch.linspace(num_timesteps, 1, num_timesteps)
elif schedule == "sigmoid":
betas = torch.linspace(-6, 6, num_timesteps)
betas = torch.sigmoid(betas) * (end - start) + start
elif schedule == "cosine" or schedule == "cosine_reverse":
max_beta = 0.999
cosine_s = 0.008
betas = torch.tensor(
[min(1 - (math.cos(((i + 1) / num_timesteps + cosine_s) / (1 + cosine_s) * math.pi / 2) ** 2) / (
math.cos((i / num_timesteps + cosine_s) / (1 + cosine_s) * math.pi / 2) ** 2), max_beta) for i in
range(num_timesteps)])
elif schedule == "cosine_anneal":
betas = torch.tensor(
[start + 0.5 * (end - start) * (1 - math.cos(t / (num_timesteps - 1) * math.pi)) for t in
range(num_timesteps)])
return betas
def extract(input, t, x):
shape = x.shape
out = torch.gather(input, 0, t.to(input.device))
reshape = [t.shape[0]] + [1] * (len(shape) - 1)
return out.reshape(*reshape)
# Forward functions
def q_sample(y, y_0_hat, alphas_bar_sqrt, one_minus_alphas_bar_sqrt, t, noise=None):
"""
y_0_hat: prediction of pre-trained guidance classifier; can be extended to represent
any prior mean setting at timestep T.
"""
if noise is None:
noise = torch.randn_like(y).to(y.device)
sqrt_alpha_bar_t = extract(alphas_bar_sqrt, t, y)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
# q(y_t | y_0, x)
y_t = sqrt_alpha_bar_t * y + (1 - sqrt_alpha_bar_t) * y_0_hat + sqrt_one_minus_alpha_bar_t * noise
return y_t
# Reverse function -- sample y_{t-1} given y_t
def p_sample(model, x, y, y_0_hat, y_T_mean, t, alphas, one_minus_alphas_bar_sqrt):
"""
Reverse diffusion process sampling -- one time step.
y: sampled y at time step t, y_t.
y_0_hat: prediction of pre-trained guidance model.
y_T_mean: mean of prior distribution at timestep T.
We replace y_0_hat with y_T_mean in the forward process posterior mean computation, emphasizing that
guidance model prediction y_0_hat = f_phi(x) is part of the input to eps_theta network, while
in paper we also choose to set the prior mean at timestep T y_T_mean = f_phi(x).
"""
device = next(model.parameters()).device
z = torch.randn_like(y)
t = torch.tensor([t]).to(device)
alpha_t = extract(alphas, t, y)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
sqrt_one_minus_alpha_bar_t_m_1 = extract(one_minus_alphas_bar_sqrt, t - 1, y)
sqrt_alpha_bar_t = (1 - sqrt_one_minus_alpha_bar_t.square()).sqrt()
sqrt_alpha_bar_t_m_1 = (1 - sqrt_one_minus_alpha_bar_t_m_1.square()).sqrt()
# y_t_m_1 posterior mean component coefficients
gamma_0 = (1 - alpha_t) * sqrt_alpha_bar_t_m_1 / (sqrt_one_minus_alpha_bar_t.square())
gamma_1 = (sqrt_one_minus_alpha_bar_t_m_1.square()) * (alpha_t.sqrt()) / (sqrt_one_minus_alpha_bar_t.square())
gamma_2 = 1 + (sqrt_alpha_bar_t - 1) * (alpha_t.sqrt() + sqrt_alpha_bar_t_m_1) / (
sqrt_one_minus_alpha_bar_t.square())
eps_theta = model(x, y, t, y_0_hat).to(device).detach()
# y_0 reparameterization
y_0_reparam = 1 / sqrt_alpha_bar_t * (
y - (1 - sqrt_alpha_bar_t) * y_T_mean - eps_theta * sqrt_one_minus_alpha_bar_t)
# posterior mean
y_t_m_1_hat = gamma_0 * y_0_reparam + gamma_1 * y + gamma_2 * y_T_mean
# posterior variance
beta_t_hat = (sqrt_one_minus_alpha_bar_t_m_1.square()) / (sqrt_one_minus_alpha_bar_t.square()) * (1 - alpha_t)
y_t_m_1 = y_t_m_1_hat.to(device) + beta_t_hat.sqrt().to(device) * z.to(device)
return y_t_m_1
# Reverse function -- sample y_0 given y_1
def p_sample_t_1to0(model, x, y, y_0_hat, y_T_mean, one_minus_alphas_bar_sqrt):
device = next(model.parameters()).device
t = torch.tensor([0]).to(device) # corresponding to timestep 1 (i.e., t=1 in diffusion models)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
sqrt_alpha_bar_t = (1 - sqrt_one_minus_alpha_bar_t.square()).sqrt()
eps_theta = model(x, y, t, y_0_hat).to(device).detach()
# y_0 reparameterization
y_0_reparam = 1 / sqrt_alpha_bar_t * (
y - (1 - sqrt_alpha_bar_t) * y_T_mean - eps_theta * sqrt_one_minus_alpha_bar_t)
y_t_m_1 = y_0_reparam.to(device)
return y_t_m_1
def y_0_reparam(model, x, y, y_0_hat, y_T_mean, t, one_minus_alphas_bar_sqrt):
"""
Obtain y_0 reparameterization from q(y_t | y_0), in which noise term is the eps_theta prediction.
Algorithm 2 Line 4 in paper.
"""
device = next(model.parameters()).device
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
sqrt_alpha_bar_t = (1 - sqrt_one_minus_alpha_bar_t.square()).sqrt()
eps_theta = model(x, y, t, y_0_hat).to(device).detach()
# y_0 reparameterization
y_0_reparam = 1 / sqrt_alpha_bar_t * (
y - (1 - sqrt_alpha_bar_t) * y_T_mean - eps_theta * sqrt_one_minus_alpha_bar_t).to(device)
return y_0_reparam
def p_sample_loop(model, x, y_0_hat, y_T_mean, n_steps, alphas, one_minus_alphas_bar_sqrt,
only_last_sample=False):
num_t, y_p_seq = None, None
device = next(model.parameters()).device
z = torch.randn_like(y_T_mean).to(device)
cur_y = z + y_T_mean # sampled y_T
if only_last_sample:
num_t = 1
else:
y_p_seq = [cur_y]
for t in reversed(range(1, n_steps)):
y_t = cur_y
cur_y = p_sample(model, x, y_t, y_0_hat, y_T_mean, t, alphas, one_minus_alphas_bar_sqrt) # y_{t-1}
if only_last_sample:
num_t += 1
else:
y_p_seq.append(cur_y)
if only_last_sample:
assert num_t == n_steps
y_0 = p_sample_t_1to0(model, x, cur_y, y_0_hat, y_T_mean, one_minus_alphas_bar_sqrt)
return y_0
else:
assert len(y_p_seq) == n_steps
y_0 = p_sample_t_1to0(model, x, y_p_seq[-1], y_0_hat, y_T_mean, one_minus_alphas_bar_sqrt)
y_p_seq.append(y_0)
return y_p_seq
def compute_kernel(x, y):
x_size = x.size(0)
y_size = y.size(0)
dim = x.size(1)
x = x.unsqueeze(1) # (x_size, 1, dim)
y = y.unsqueeze(0) # (1, y_size, dim)
tiled_x = x.expand(x_size, y_size, dim)
tiled_y = y.expand(x_size, y_size, dim)
kernel_input = (tiled_x - tiled_y).pow(2).mean(2)/float(dim)
return torch.exp(-kernel_input) # (x_size, y_size)
def compute_mmd(x, y):
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
mmd = x_kernel.mean() + y_kernel.mean() - 2*xy_kernel.mean()
return mmd
| 7,115 | 41.357143 | 117 | py |
DiffMIC | DiffMIC-main/pretraining/dcg.py | import torch
import torch.nn as nn
import numpy as np
import pretraining.tools as tools
import pretraining.modules as m
class DCG(nn.Module):
def __init__(self, parameters):
super(DCG, self).__init__()
# save parameters
self.experiment_parameters = {
"device_type": 'gpu',
"gpu_number": 0,
# model related hyper-parameters
"cam_size": (7, 7),
"K": 6,
"crop_shape": (32, 32),
"post_processing_dim":512,
"num_classes":parameters.data.num_classes,
"use_v1_global":True,
"percent_t": 1.0,
}
self.cam_size = self.experiment_parameters["cam_size"]
# construct networks
# global network
self.global_network = m.GlobalNetwork(self.experiment_parameters, self)
self.global_network.add_layers()
# aggregation function
self.aggregation_function = m.TopTPercentAggregationFunction(self.experiment_parameters, self)
# detection module
self.retrieve_roi_crops = m.RetrieveROIModule(self.experiment_parameters, self)
# detection network
self.local_network = m.LocalNetwork(self.experiment_parameters, self)
self.local_network.add_layers()
# MIL module
self.attention_module = m.AttentionModule(self.experiment_parameters, self)
self.attention_module.add_layers()
# fusion branch
# self.fusion_dnn = nn.Linear(self.experiment_parameters["post_processing_dim"]+512, self.experiment_parameters["num_classes"], bias=False)
def _convert_crop_position(self, crops_x_small, cam_size, x_original):
"""
Function that converts the crop locations from cam_size to x_original
:param crops_x_small: N, k*c, 2 numpy matrix
:param cam_size: (h,w)
:param x_original: N, C, H, W pytorch variable
:return: N, k*c, 2 numpy matrix
"""
# retrieve the dimension of both the original image and the small version
h, w = cam_size
_, _, H, W = x_original.size()
# interpolate the 2d index in h_small to index in x_original
top_k_prop_x = crops_x_small[:, :, 0] / h
top_k_prop_y = crops_x_small[:, :, 1] / w
# sanity check
assert np.max(top_k_prop_x) <= 1.0, "top_k_prop_x >= 1.0"
assert np.min(top_k_prop_x) >= 0.0, "top_k_prop_x <= 0.0"
assert np.max(top_k_prop_y) <= 1.0, "top_k_prop_y >= 1.0"
assert np.min(top_k_prop_y) >= 0.0, "top_k_prop_y <= 0.0"
# interpolate the crop position from cam_size to x_original
top_k_interpolate_x = np.expand_dims(np.around(top_k_prop_x * H), -1)
top_k_interpolate_y = np.expand_dims(np.around(top_k_prop_y * W), -1)
top_k_interpolate_2d = np.concatenate([top_k_interpolate_x, top_k_interpolate_y], axis=-1)
return top_k_interpolate_2d
def _retrieve_crop(self, x_original_pytorch, crop_positions, crop_method):
"""
Function that takes in the original image and cropping position and returns the crops
:param x_original_pytorch: PyTorch Tensor array (N,C,H,W)
:param crop_positions:
:return:
"""
batch_size, num_crops, _ = crop_positions.shape
crop_h, crop_w = self.experiment_parameters["crop_shape"]
output = torch.ones((batch_size, num_crops, crop_h, crop_w))
if self.experiment_parameters["device_type"] == "gpu":
device = torch.device("cuda:{}".format(self.experiment_parameters["gpu_number"]))
output = output.to(device)
for i in range(batch_size):
for j in range(num_crops):
tools.crop_pytorch(x_original_pytorch[i, 0, :, :],
self.experiment_parameters["crop_shape"],
crop_positions[i,j,:],
output[i,j,:,:],
method=crop_method)
return output
def forward(self, x_original):
"""
:param x_original: N,H,W,C numpy matrix
"""
# global network: x_small -> class activation map
h_g, self.saliency_map = self.global_network.forward(x_original)
# calculate y_global
# note that y_global is not directly used in inference
self.y_global = self.aggregation_function.forward(self.saliency_map)
# region proposal network
small_x_locations = self.retrieve_roi_crops.forward(x_original, self.cam_size, self.saliency_map)
# convert crop locations that is on self.cam_size to x_original
self.patch_locations = self._convert_crop_position(small_x_locations, self.cam_size, x_original)
# patch retriever
crops_variable = self._retrieve_crop(x_original, self.patch_locations, self.retrieve_roi_crops.crop_method)
self.patches = crops_variable.data.cpu().numpy()
# detection network
batch_size, num_crops, I, J = crops_variable.size()
crops_variable = crops_variable.view(batch_size * num_crops, I, J).unsqueeze(1)
h_crops = self.local_network.forward(crops_variable).view(batch_size, num_crops, -1)
# MIL module
# y_local is not directly used during inference
z, self.patch_attns, self.y_local = self.attention_module.forward(h_crops)
self.y_fusion = 0.5* (self.y_global+self.y_local)
return self.y_fusion, self.y_global, self.y_local | 5,533 | 41.569231 | 147 | py |
DiffMIC | DiffMIC-main/pretraining/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pretraining.tools as tools
from torchvision.models.resnet import conv3x3, resnet18, resnet50
class BasicBlockV2(nn.Module):
"""
Basic Residual Block of ResNet V2
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockV2, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=1)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
# Phase 1
out = self.bn1(x)
out = self.relu(out)
if self.downsample is not None:
residual = self.downsample(out)
out = self.conv1(out)
# Phase 2
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out += residual
return out
class BasicBlockV1(nn.Module):
"""
Basic Residual Block of ResNet V1
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockV1, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetV2(nn.Module):
"""
Adapted fom torchvision ResNet, converted to v2
"""
def __init__(self,
input_channels, num_filters,
first_layer_kernel_size, first_layer_conv_stride,
blocks_per_layer_list, block_strides_list, block_fn,
first_layer_padding=0,
first_pool_size=None, first_pool_stride=None, first_pool_padding=0,
growth_factor=2):
super(ResNetV2, self).__init__()
self.first_conv = nn.Conv2d(
in_channels=input_channels, out_channels=num_filters,
kernel_size=first_layer_kernel_size,
stride=first_layer_conv_stride,
padding=first_layer_padding,
bias=False,
)
self.first_pool = nn.MaxPool2d(
kernel_size=first_pool_size,
stride=first_pool_stride,
padding=first_pool_padding,
)
self.layer_list = nn.ModuleList()
current_num_filters = num_filters
self.inplanes = num_filters
for i, (num_blocks, stride) in enumerate(zip(
blocks_per_layer_list, block_strides_list)):
self.layer_list.append(self._make_layer(
block=block_fn,
planes=current_num_filters,
blocks=num_blocks,
stride=stride,
))
current_num_filters *= growth_factor
self.final_bn = nn.BatchNorm2d(
current_num_filters // growth_factor * block_fn.expansion
)
self.relu = nn.ReLU()
# Expose attributes for downstream dimension computation
self.num_filters = num_filters
self.growth_factor = growth_factor
def forward(self, x):
h = self.first_conv(x)
h = self.first_pool(h)
for i, layer in enumerate(self.layer_list):
h = layer(h)
h = self.final_bn(h)
h = self.relu(h)
return h
def _make_layer(self, block, planes, blocks, stride=1):
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
)
layers_ = [
block(self.inplanes, planes, stride, downsample)
]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers_.append(block(self.inplanes, planes))
return nn.Sequential(*layers_)
class ResNetV1(nn.Module):
"""
Class that represents a ResNet with classifier sequence removed
"""
def __init__(self, initial_filters, block, layers, input_channels=1):
self.inplanes = initial_filters
self.num_layers = len(layers)
super(ResNetV1, self).__init__()
# initial sequence
# the first sequence only has 1 input channel which is different from original ResNet
self.conv1 = nn.Conv2d(input_channels, initial_filters, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(initial_filters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# residual sequence
for i in range(self.num_layers):
num_filters = initial_filters * pow(2,i)
num_stride = (1 if i == 0 else 2)
setattr(self, 'layer{0}'.format(i+1), self._make_layer(block, num_filters, layers[i], stride=num_stride))
self.num_filter_last_seq = initial_filters * pow(2, self.num_layers-1)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# first sequence
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# residual sequences
for i in range(self.num_layers):
x = getattr(self, 'layer{0}'.format(i+1))(x)
return x
class DownsampleNetworkResNet18V1(nn.Module):
"""
Downsampling using ResNet V1
First conv is 7*7, stride 2, padding 3, cut 1/2 resolution
"""
def __init__(self):
super(DownsampleNetworkResNet18V1, self).__init__()
self.f = []
backbone = resnet50(pretrained=True)
#backbone = resnet18(pretrained=True)
for name, module in backbone.named_children():
if name != 'fc' and name != 'avgpool':
self.f.append(module)
#print(self.f)
# encoder
self.f = nn.Sequential(*self.f)
def forward(self, x):
last_feature_map = self.f(x)
#print(last_feature_map.shape)
return last_feature_map
class AbstractMILUnit:
"""
An abstract class that represents an MIL unit module
"""
def __init__(self, parameters, parent_module):
self.parameters = parameters
self.parent_module = parent_module
class PostProcessingStandard(nn.Module):
"""
Unit in Global Network that takes in x_out and produce saliency maps
"""
def __init__(self, parameters):
super(PostProcessingStandard, self).__init__()
# map all filters to output classes
self.gn_conv_last = nn.Conv2d(parameters["post_processing_dim"]*4,
parameters["num_classes"],
(1, 1), bias=False)
def forward(self, x_out):
out = self.gn_conv_last(x_out)
return out
class GlobalNetwork(AbstractMILUnit):
"""
Implementation of Global Network using ResNet-22
"""
def __init__(self, parameters, parent_module):
super(GlobalNetwork, self).__init__(parameters, parent_module)
# downsampling-branch
if "use_v1_global" in parameters and parameters["use_v1_global"]:
self.downsampling_branch = DownsampleNetworkResNet18V1()
else:
self.downsampling_branch = ResNetV2(input_channels=1, num_filters=16,
# first conv layer
first_layer_kernel_size=(7,7), first_layer_conv_stride=2,
first_layer_padding=3,
# first pooling layer
first_pool_size=3, first_pool_stride=2, first_pool_padding=0,
# res blocks architecture
blocks_per_layer_list=[2, 2, 2, 2, 2],
block_strides_list=[1, 2, 2, 2, 2],
block_fn=BasicBlockV2,
growth_factor=2)
# post-processing
self.postprocess_module = PostProcessingStandard(parameters)
def add_layers(self):
self.parent_module.ds_net = self.downsampling_branch
self.parent_module.left_postprocess_net = self.postprocess_module
def forward(self, x):
# retrieve results from downsampling network at all 4 levels
last_feature_map = self.downsampling_branch.forward(x)
# feed into postprocessing network
cam = self.postprocess_module.forward(last_feature_map)
return last_feature_map, cam
class TopTPercentAggregationFunction(AbstractMILUnit):
"""
An aggregator that uses the SM to compute the y_global.
Use the sum of topK value
"""
def __init__(self, parameters, parent_module):
super(TopTPercentAggregationFunction, self).__init__(parameters, parent_module)
self.percent_t = parameters["percent_t"]
self.parent_module = parent_module
def forward(self, cam):
batch_size, num_class, H, W = cam.size()
cam_flatten = cam.view(batch_size, num_class, -1)
top_t = int(round(W*H*self.percent_t))
selected_area = cam_flatten.topk(top_t, dim=2)[0]
return selected_area.mean(dim=2)
class RetrieveROIModule(AbstractMILUnit):
"""
A Regional Proposal Network instance that computes the locations of the crops
Greedy select crops with largest sums
"""
def __init__(self, parameters, parent_module):
super(RetrieveROIModule, self).__init__(parameters, parent_module)
self.crop_method = "upper_left"
self.num_crops_per_class = parameters["K"]
self.crop_shape = parameters["crop_shape"]
self.gpu_number = None if parameters["device_type"]!="gpu" else parameters["gpu_number"]
def forward(self, x_original, cam_size, h_small):
"""
Function that use the low-res image to determine the position of the high-res crops
:param x_original: N, C, H, W pytorch tensor
:param cam_size: (h, w)
:param h_small: N, C, h_h, w_h pytorch tensor
:return: N, num_classes*k, 2 numpy matrix; returned coordinates are corresponding to x_small
"""
# retrieve parameters
_, _, H, W = x_original.size()
(h, w) = cam_size
N, C, h_h, w_h = h_small.size()
#print(h_small.size())
# make sure that the size of h_small == size of cam_size
assert h_h == h, "h_h!=h"
assert w_h == w, "w_h!=w"
# adjust crop_shape since crop shape is based on the original image
crop_x_adjusted = int(np.round(self.crop_shape[0] * h / H))
crop_y_adjusted = int(np.round(self.crop_shape[1] * w / W))
crop_shape_adjusted = (crop_x_adjusted, crop_y_adjusted)
# greedily find the box with max sum of weights
current_images = h_small
all_max_position = []
# combine channels
max_vals = current_images.view(N, C, -1).max(dim=2, keepdim=True)[0].unsqueeze(-1)
min_vals = current_images.view(N, C, -1).min(dim=2, keepdim=True)[0].unsqueeze(-1)
range_vals = max_vals - min_vals
normalize_images = current_images - min_vals
normalize_images = normalize_images / range_vals
current_images = normalize_images.sum(dim=1, keepdim=True)
for _ in range(self.num_crops_per_class):
max_pos = tools.get_max_window(current_images, crop_shape_adjusted, "avg")
all_max_position.append(max_pos)
mask = tools.generate_mask_uplft(current_images, crop_shape_adjusted, max_pos, self.gpu_number)
current_images = current_images * mask
return torch.cat(all_max_position, dim=1).data.cpu().numpy()
class LocalNetwork(AbstractMILUnit):
"""
The local network that takes a crop and computes its hidden representation
Use ResNet
"""
def add_layers(self):
"""
Function that add layers to the parent module that implements nn.Module
:return:
"""
self.parent_module.dn_resnet = DownsampleNetworkResNet18V1()
def forward(self, x_crop):
"""
Function that takes in a single crop and return the hidden representation
:param x_crop: (N,C,h,w)
:return:
"""
# forward propagte using ResNet
res = self.parent_module.dn_resnet(x_crop.expand(-1, 3, -1 , -1))
# global average pooling
res = res.mean(dim=2).mean(dim=2)
return res
class AttentionModule(AbstractMILUnit):
"""
The attention module takes multiple hidden representations and compute the attention-weighted average
Use Gated Attention Mechanism in https://arxiv.org/pdf/1802.04712.pdf
"""
def add_layers(self):
"""
Function that add layers to the parent module that implements nn.Module
:return:
"""
# The gated attention mechanism
self.parent_module.mil_attn_V = nn.Linear(512*4, 128, bias=False)
self.parent_module.mil_attn_U = nn.Linear(512*4, 128, bias=False)
self.parent_module.mil_attn_w = nn.Linear(128, 1, bias=False)
# classifier
self.parent_module.classifier_linear = nn.Linear(512*4, self.parameters["num_classes"], bias=False)
def forward(self, h_crops):
"""
Function that takes in the hidden representations of crops and use attention to generate a single hidden vector
:param h_small:
:param h_crops:
:return:
"""
batch_size, num_crops, h_dim = h_crops.size()
h_crops_reshape = h_crops.view(batch_size * num_crops, h_dim)
# calculate the attn score
attn_projection = torch.sigmoid(self.parent_module.mil_attn_U(h_crops_reshape)) * \
torch.tanh(self.parent_module.mil_attn_V(h_crops_reshape))
attn_score = self.parent_module.mil_attn_w(attn_projection)
# use softmax to map score to attention
attn_score_reshape = attn_score.view(batch_size, num_crops)
attn = F.softmax(attn_score_reshape, dim=1)
# final hidden vector
z_weighted_avg = torch.sum(attn.unsqueeze(-1) * h_crops, 1)
# map to the final layer
y_crops = self.parent_module.classifier_linear(z_weighted_avg)
return z_weighted_avg, attn, y_crops | 15,698 | 34.679545 | 119 | py |
DiffMIC | DiffMIC-main/pretraining/resnet.py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1,
stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, padding=1,
stride=stride, groups=groups,
dilation=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * self.expansion,
kernel_size=1, stride=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10,
groups=1, width_per_group=64):
super(ResNet, self).__init__()
self.in_planes = 64
self.dilation = 1
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.in_planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_planes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_planes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = list()
layers.append(block(self.in_planes, planes, stride, downsample))
self.in_planes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def build_ResNet(cfg):
_blocks = {
"Basic": BasicBlock,
"Bottleneck": Bottleneck,
}
return ResNet(_blocks[cfg['RESNET_BLOCK']],
cfg['RESNET_LAYERS'], cfg['NUM_CLASSES'])
def ResNet18(num_classes=10):
cfg ={
"RESNET_BLOCK": "Basic",
"RESNET_LAYERS": (2, 2, 2, 2),
"NUM_CLASSES": num_classes,
}
return build_ResNet(cfg)
| 4,706 | 31.6875 | 78 | py |
DiffMIC | DiffMIC-main/pretraining/densenet.py | import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torchvision._internally_replaced_utils import load_state_dict_from_url
from torch import Tensor
from typing import Any, List, Tuple
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
class _DenseLayer(nn.Module):
def __init__(
self,
num_input_features: int,
growth_rate: int,
bn_size: int,
drop_rate: float,
memory_efficient: bool = False
) -> None:
super(_DenseLayer, self).__init__()
self.norm1: nn.BatchNorm2d
self.add_module('norm1', nn.BatchNorm2d(num_input_features))
self.relu1: nn.ReLU
self.add_module('relu1', nn.ReLU(inplace=True))
self.conv1: nn.Conv2d
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False))
self.norm2: nn.BatchNorm2d
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate))
self.relu2: nn.ReLU
self.add_module('relu2', nn.ReLU(inplace=True))
self.conv2: nn.Conv2d
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False))
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor:
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor:
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False
) -> None:
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False
) -> None:
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
#out = F.adaptive_avg_pool2d(out, (1, 1))
#out = torch.flatten(out, 1)
#out = self.classifier(out)
return out
def _load_state_dict(model: nn.Module, model_url: str, progress: bool) -> None:
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(
arch: str,
growth_rate: int,
block_config: Tuple[int, int, int, int],
num_init_features: int,
pretrained: bool,
progress: bool,
**kwargs: Any
) -> DenseNet:
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
def densenet121(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
The required minimum input size of the model is 29x29.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet161(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
The required minimum input size of the model is 29x29.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
def densenet169(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
The required minimum input size of the model is 29x29.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
def densenet201(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
The required minimum input size of the model is 29x29.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
**kwargs)
| 12,797 | 39.628571 | 105 | py |
DiffMIC | DiffMIC-main/pretraining/tools.py | import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
def partition_batch(ls, size):
"""
Partitions a list into buckets of given maximum length.
"""
i = 0
partitioned_lists = []
while i < len(ls):
partitioned_lists.append(ls[i: i+size])
i += size
return partitioned_lists
def make_sure_in_range(val, min_val, max_val):
"""
Function that make sure that min < val < max; otherwise return the limit value
"""
if val < min_val:
return min_val
if val > max_val:
return max_val
return val
def crop(original_img, crop_shape, crop_position, method="center",
in_place=False, background_val="min"):
"""
Function that take a crop on the original image.
This function must staty in numpy since original_img should not be loaded into Pytorch during the network time.
original_img is large and would consume lots of GPU memory.
:param original_img:
:param crop_shape:
:param crop_position:
:param method: supported in ["center", "upper_left"]
:param in_place: if in_place, the effective pixels in the crop will be flagged (1.0) in the original_img
"""
# retrieve inputs
I, J = original_img.shape
crop_x, crop_y = crop_position
x_delta, y_delta = crop_shape
# locate the four corners
if method == "center":
min_x = int(np.round(crop_x - x_delta / 2))
max_x = int(np.round(crop_x + x_delta / 2))
min_y = int(np.round(crop_y - y_delta / 2))
max_y = int(np.round(crop_y + y_delta/2))
elif method == "upper_left":
min_x = int(np.round(crop_x))
max_x = int(np.round(crop_x + x_delta))
min_y = int(np.round(crop_y))
max_y = int(np.round(crop_y + y_delta))
# make sure that the crops are in range
min_x = make_sure_in_range(min_x, 0, I)
max_x = make_sure_in_range(max_x, 0, I)
min_y = make_sure_in_range(min_y, 0, J)
max_y = make_sure_in_range(max_y, 0, J)
# if in_place, flag the original inputs
if in_place:
original_img[min_x:max_x, min_y:max_y] = 1.0
# else make the new matrix
else:
# somehow background is normalized to this number
if background_val == "min":
output = np.ones(crop_shape) * np.min(original_img)
else:
output = np.ones(crop_shape) * background_val
real_x_delta = max_x - min_x
real_y_delta = max_y - min_y
origin_x = crop_shape[0] - real_x_delta
origin_y = crop_shape[1] - real_y_delta
output[origin_x:, origin_y:] = original_img[min_x:max_x, min_y:max_y]
return output
def get_crop_mask(loc, crop_shape, image_shape, method, indicator=True):
"""
Function that generates the mask
:param loc:
:param crop_shape:
:param image_shape:
:param method:
:return:
"""
crop_map = np.zeros(image_shape)
for crop_loc in loc:
# this is the indicator for point of crop
if indicator:
crop_map[int(crop_loc[0]), int(crop_loc[1])] = 999.0
# fill in 1.0 in the cropped regions
crop(crop_map, crop_shape, crop_loc, method=method, in_place=True)
return crop_map
def crop_pytorch(original_img_pytorch, crop_shape, crop_position, out,
method="center", background_val="min"):
"""
Function that take a crop on the original image.
Use PyTorch to do this.
:param original_img_pytorch: (N,C,H,W) PyTorch Tensor
:param crop_shape: (h, w) integer tuple
:param method: supported in ["center", "upper_left"]
:return: (N, K, h, w) PyTorch Tensor
"""
# retrieve inputs
H, W = original_img_pytorch.shape
crop_x, crop_y = crop_position
x_delta, y_delta = crop_shape
# locate the four corners
if method == "center":
min_x = int(np.round(crop_x - x_delta / 2))
max_x = int(np.round(crop_x + x_delta / 2))
min_y = int(np.round(crop_y - y_delta / 2))
max_y = int(np.round(crop_y + y_delta / 2))
elif method == "upper_left":
min_x = int(np.round(crop_x))
max_x = int(np.round(crop_x + x_delta))
min_y = int(np.round(crop_y))
max_y = int(np.round(crop_y + y_delta))
# make sure that the crops are in range
min_x = make_sure_in_range(min_x, 0, H)
max_x = make_sure_in_range(max_x, 0, H)
min_y = make_sure_in_range(min_y, 0, W)
max_y = make_sure_in_range(max_y, 0, W)
# somehow background is normalized to this number
if background_val == "min":
out[:, :] = original_img_pytorch.min()
else:
out[:, :] = background_val
real_x_delta = max_x - min_x
real_y_delta = max_y - min_y
origin_x = crop_shape[0] - real_x_delta
origin_y = crop_shape[1] - real_y_delta
out[origin_x:, origin_y:] = original_img_pytorch[min_x:max_x, min_y:max_y]
def get_max_window(input_image, window_shape, pooling_logic="avg"):
"""
Function that makes a sliding window of size window_shape over the
input_image and return the UPPER_LEFT corner index with max sum
:param input_image: N*C*H*W
:param window_shape: h*w
:return: N*C*2 tensor
"""
N, C, H, W = input_image.size()
if pooling_logic == "avg":
# use average pooling to locate the window sums
pool_map = torch.nn.functional.avg_pool2d(input_image, window_shape, stride=1)
elif pooling_logic in ["std", "avg_entropy"]:
# create sliding windows
output_size = (H - window_shape[0] + 1, W - window_shape[1] + 1)
sliding_windows = F.unfold(input_image, kernel_size=window_shape).view(N,C, window_shape[0]*window_shape[1], -1)
# apply aggregation function on each sliding windows
if pooling_logic == "std":
agg_res = sliding_windows.std(dim=2, keepdim=False)
elif pooling_logic == "avg_entropy":
agg_res = -sliding_windows*torch.log(sliding_windows)-(1-sliding_windows)*torch.log(1-sliding_windows)
agg_res = agg_res.mean(dim=2, keepdim=False)
# merge back
pool_map = F.fold(agg_res, kernel_size=(1, 1), output_size=output_size)
_, _, _, W_map = pool_map.size()
# transform to linear and get the index of the max val locations
_, max_linear_idx = torch.max(pool_map.view(N, C, -1), -1)
# convert back to 2d index
#max_idx_x = max_linear_idx // W_map
max_idx_x = torch.div(max_linear_idx, W_map, rounding_mode='trunc')
max_idx_y = max_linear_idx - max_idx_x * W_map
# put together the 2d index
upper_left_points = torch.cat([max_idx_x.unsqueeze(-1), max_idx_y.unsqueeze(-1)], dim=-1)
return upper_left_points
def generate_mask_uplft(input_image, window_shape, upper_left_points, gpu_number):
"""
Function that generates mask that sets crops given upper_left
corners to 0
:param input_image:
:param window_shape:
:param upper_left_points:
"""
N, C, H, W = input_image.size()
window_h, window_w = window_shape
# get the positions of masks
mask_x_min = upper_left_points[:,:,0]
mask_x_max = upper_left_points[:,:,0] + window_h
mask_y_min = upper_left_points[:,:,1]
mask_y_max = upper_left_points[:,:,1] + window_w
# generate masks
mask_x = Variable(torch.arange(0, H).view(-1, 1).repeat(N, C, 1, W))
mask_y = Variable(torch.arange(0, W).view(1, -1).repeat(N, C, H, 1))
if gpu_number is not None:
device = torch.device("cuda:{}".format(gpu_number))
mask_x = mask_x.to(device)
mask_y = mask_y.to(device)
x_gt_min = mask_x.float() >= mask_x_min.unsqueeze(-1).unsqueeze(-1).float()
x_ls_max = mask_x.float() < mask_x_max.unsqueeze(-1).unsqueeze(-1).float()
y_gt_min = mask_y.float() >= mask_y_min.unsqueeze(-1).unsqueeze(-1).float()
y_ls_max = mask_y.float() < mask_y_max.unsqueeze(-1).unsqueeze(-1).float()
# since logic operation is not supported for variable
# I used * for logic ANd
selected_x = x_gt_min * x_ls_max
selected_y = y_gt_min * y_ls_max
selected = selected_x * selected_y
mask = 1 - selected.float()
return mask | 8,163 | 36.62212 | 120 | py |
DiffMIC | DiffMIC-main/dataloader/loading.py | import os, torch, cv2, random
import numpy as np
from torch.utils.data import Dataset, Sampler
import torchvision.transforms as transforms
from scipy.ndimage.morphology import binary_erosion
import torchvision.transforms.functional as TF
from PIL import Image, ImageOps
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from skimage import filters
import numpy as np
import imageio
import dataloader.transforms as trans
import json, numbers
from glob import glob
import pickle
class BUDataset(Dataset):
def __init__(self, data_list, train=True):
self.trainsize = (224,224)
self.train = train
with open(data_list, "rb") as f:
tr_dl = pickle.load(f)
self.data_list = tr_dl
self.size = len(self.data_list)
#print(self.size)
if train:
self.transform_center = transforms.Compose([
trans.CropCenterSquare(),
transforms.Resize(self.trainsize),
#trans.CenterCrop(self.trainsize),
trans.RandomHorizontalFlip(),
#trans.RandomVerticalFlip(),
trans.RandomRotation(30),
#trans.adjust_light(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.transform_center = transforms.Compose([
trans.CropCenterSquare(),
transforms.Resize(self.trainsize),
#trans.CenterCrop(self.trainsize),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __getitem__(self, index):
data_pac = self.data_list[index]
img_path = data_pac['img_root']
#cl_img, cr_img, ml_img, mr_img = None
img = Image.open(img_path).convert('RGB')
img_torch = self.transform_center(img)
label = int(data_pac['label'])
return img_torch, label
def __len__(self):
return self.size
class APTOSDataset(Dataset):
def __init__(self, data_list, train=True):
self.trainsize = (224,224)
self.train = train
with open(data_list, "rb") as f:
tr_dl = pickle.load(f)
self.data_list = tr_dl
self.size = len(self.data_list)
#print(self.size)
if train:
self.transform_center = transforms.Compose([
trans.CropCenterSquare(),
transforms.Resize(self.trainsize),
#trans.CenterCrop(self.trainsize),
trans.RandomHorizontalFlip(),
trans.RandomVerticalFlip(),
trans.RandomRotation(30),
#trans.adjust_light(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.transform_center = transforms.Compose([
trans.CropCenterSquare(),
transforms.Resize(self.trainsize),
#trans.CenterCrop(self.trainsize),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
#self.depths_transform = transforms.Compose([transforms.Resize((self.trainsize, self.trainsize)),transforms.ToTensor()])
def __getitem__(self, index):
data_pac = self.data_list[index]
img_path = data_pac['img_root']
#cl_img, cr_img, ml_img, mr_img = None
img = Image.open(img_path).convert('RGB')
img_torch = self.transform_center(img)
label = int(data_pac['label'])
return img_torch, label
def __len__(self):
return self.size
class ISICDataset(Dataset):
def __init__(self, data_list, train=True):
self.trainsize = (224,224)
self.train = train
with open(data_list, "rb") as f:
tr_dl = pickle.load(f)
self.data_list = tr_dl
self.size = len(self.data_list)
if train:
self.transform_center = transforms.Compose([
trans.CropCenterSquare(),
transforms.Resize(self.trainsize),
#trans.CenterCrop(self.trainsize),
trans.RandomHorizontalFlip(),
#trans.RandomVerticalFlip(),
trans.RandomRotation(30),
#trans.adjust_light(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
self.transform_center = transforms.Compose([
trans.CropCenterSquare(),
transforms.Resize(self.trainsize),
#trans.CenterCrop(self.trainsize),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
#self.depths_transform = transforms.Compose([transforms.Resize((self.trainsize, self.trainsize)),transforms.ToTensor()])
def __getitem__(self, index):
data_pac = self.data_list[index]
img_path = data_pac['img_root']
#cl_img, cr_img, ml_img, mr_img = None
img = Image.open(img_path).convert('RGB')
img_torch = self.transform_center(img)
label = int(data_pac['label'])
return img_torch, label
def __len__(self):
return self.size
| 5,506 | 31.976048 | 128 | py |
DiffMIC | DiffMIC-main/dataloader/functional.py | import math
import random
from PIL import Image, ImageEnhance, ImageOps
try:
import accimage
except ImportError:
accimage = None
import collections
import numbers
import types
import warnings
import cv2
import numpy as np
from PIL import Image
_cv2_pad_to_str = {
'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_REFLECT_101,
'symmetric': cv2.BORDER_REFLECT
}
_cv2_interpolation_to_str = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'area': cv2.INTER_AREA,
'bicubic': cv2.INTER_CUBIC,
'lanczos': cv2.INTER_LANCZOS4
}
_cv2_interpolation_from_str = {v: k for k, v in _cv2_interpolation_to_str.items()}
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3, 4})
def resize(img, size, interpolation=cv2.INTER_LINEAR):
r"""Resize the input numpy ndarray to the given size.
Args:
img (numpy ndarray): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``cv2.INTER_LINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.abc.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
h, w = img.shape[0], img.shape[1]
if isinstance(size, int):
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
else:
ow, oh = size[1], size[0]
output = cv2.resize(img, dsize=(ow, oh), interpolation=interpolation)
if img.shape[2] == 1:
return output[:, :, np.newaxis]
else:
return output
def scale(*args, **kwargs):
warnings.warn("The use of the transforms.Scale transform is deprecated, " + "please use transforms.Resize instead.")
return resize(*args, **kwargs)
def pad(img, padding, fill=0, padding_mode='constant'):
r"""Pad the given numpy ndarray on all sides with specified padding mode and fill value.
Args:
img (numpy ndarray): image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
Numpy image: padded image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Padding mode should be either constant, edge, reflect or symmetric'
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, collections.Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, collections.Sequence) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
if img.shape[2] == 1:
return cv2.copyMakeBorder(img,
top=pad_top,
bottom=pad_bottom,
left=pad_left,
right=pad_right,
borderType=_cv2_pad_to_str[padding_mode],
value=fill)[:, :, np.newaxis]
else:
return cv2.copyMakeBorder(img,
top=pad_top,
bottom=pad_bottom,
left=pad_left,
right=pad_right,
borderType=_cv2_pad_to_str[padding_mode],
value=fill)
def crop(img, i, j, h, w):
"""Crop the given PIL Image.
Args:
img (numpy ndarray): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
Returns:
numpy ndarray: Cropped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy image. Got {}'.format(type(img)))
return img[i:i + h, j:j + w, :]
def crop3d(img, i, j, k, d, h, w):
"""Crop the given PIL Image.
Args:
img (numpy ndarray): Image to be cropped.
Returns:
numpy ndarray: Cropped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy image. Got {}'.format(type(img)))
return img[i:i + d, j:j + h, k:k+w, :]
def center_crop(img, output_size):
if len(img.shape) == 3:
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
h, w = img.shape[0:2]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(img, i, j, th, tw)
elif len(img.shape) == 4:
d, h, w = img.shape[:3]
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size), int(output_size))
td, th, tw = output_size
i = int(round((d - td) / 2.))
j = int(round((h - th) / 2.))
k = int(round((w - tw) / 2.))
return crop3d(img, i, j, k, td, th, tw)
def resized_crop(img, i, j, h, w, size, interpolation=cv2.INTER_LINEAR):
"""Crop the given numpy ndarray and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (numpy ndarray): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``scale``.
interpolation (int, optional): Desired interpolation. Default is
``cv2.INTER_CUBIC``.
Returns:
PIL Image: Cropped image.
"""
assert _is_numpy_image(img), 'img should be numpy image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation=interpolation)
return img
def flip(im, axis):
"""
axis: str
z - flip along Depth (z-axis)
y - flip along Height (y-axis)
x - flip along Width (x-axis)
"""
if len(im.shape) == 4: # D, H, W, C
axis = {"z": 0, "y": 1, "x": 2}[axis]
elif len(im.shape) == 3: # H, W, C
axis = {"x": 0, "y": 1}[axis]
im = np.flip(im, axis)
return im
def five_crop(img, size):
"""Crop the given numpy ndarray into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
h, w = img.shape[0:2]
crop_h, crop_w = size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(size, (h, w)))
tl = crop(img, 0, 0, crop_h, crop_w)
tr = crop(img, 0, w - crop_w, crop_h, crop_w)
bl = crop(img, h - crop_h, 0, crop_h, crop_w)
br = crop(img, h - crop_h, w - crop_w, crop_h, crop_w)
center = center_crop(img, (crop_h, crop_w))
return tl, tr, bl, br, center
def ten_crop(img, size, vertical_flip=False):
r"""Crop the given numpy ndarray into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and center crop
and same for the flipped image.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (numpy ndarray): numpy ndarray to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
numpy ndarray: Brightness adjusted image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
table = np.array([i * brightness_factor for i in range(0, 256)]).clip(0, 255).astype('uint8')
# same thing but a bit slower
# cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
if img.shape[2] == 1:
return cv2.LUT(img, table)[:, :, np.newaxis]
else:
return cv2.LUT(img, table)
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an mage.
Args:
img (numpy ndarray): numpy ndarray to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
numpy ndarray: Contrast adjusted image.
"""
# much faster to use the LUT construction than anything else I've tried
# it's because you have to change dtypes multiple times
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
# input is RGB
if img.ndim > 2 and img.shape[2] == 3:
mean_value = round(cv2.mean(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))[0])
elif img.ndim == 2:
# grayscale input
mean_value = round(cv2.mean(img)[0])
else:
# multichannel input
mean_value = round(np.mean(img))
table = np.array([(i - mean_value) * contrast_factor + mean_value for i in range(0, 256)]).clip(0,
255).astype('uint8')
# enhancer = ImageEnhance.Contrast(img)
# img = enhancer.enhance(contrast_factor)
if img.ndim == 2 or img.shape[2] == 1:
return cv2.LUT(img, table)[:, :, np.newaxis]
else:
return cv2.LUT(img, table)
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (numpy ndarray): numpy ndarray to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
numpy ndarray: Saturation adjusted image.
"""
# ~10ms slower than PIL!
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
img = Image.fromarray(img)
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return np.array(img)
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (numpy ndarray): numpy ndarray to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
numpy ndarray: Hue adjusted image.
"""
# After testing, found that OpenCV calculates the Hue in a call to
# cv2.cvtColor(..., cv2.COLOR_BGR2HSV) differently from PIL
# This function takes 160ms! should be avoided
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
img = Image.fromarray(img)
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return np.array(img)
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return np.array(img)
def adjust_gamma(img, gamma, gain=1):
r"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (numpy ndarray): numpy ndarray to be adjusted.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
# from here
# https://stackoverflow.com/questions/33322488/how-to-change-image-illumination-in-opencv-python/41061351
table = np.array([((i / 255.0)**gamma) * 255 * gain for i in np.arange(0, 256)]).astype('uint8')
if img.shape[2] == 1:
return cv2.LUT(img, table)[:, :, np.newaxis]
else:
return cv2.LUT(img, table)
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle.
Args:
img (numpy ndarray): numpy ndarray to be rotated.
angle (float or int): In degrees degrees counter clockwise order.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
rows, cols = img.shape[0:2]
if center is None:
center = (cols / 2, rows / 2)
M = cv2.getRotationMatrix2D(center, angle, 1)
if img.shape[2] == 1:
return cv2.warpAffine(img, M, (cols, rows))[:, :, np.newaxis]
else:
return cv2.warpAffine(img, M, (cols, rows))
def _get_affine_matrix(center, angle, translate, scale, shear):
# Helper method to compute matrix for affine transformation
# We need compute affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# RSS(a, scale, shear) = [ cos(a)*scale -sin(a + shear)*scale 0]
# [ sin(a)*scale cos(a + shear)*scale 0]
# [ 0 0 1]
angle = math.radians(angle)
shear = math.radians(shear)
# scale = 1.0 / scale
T = np.array([[1, 0, translate[0]], [0, 1, translate[1]], [0, 0, 1]])
C = np.array([[1, 0, center[0]], [0, 1, center[1]], [0, 0, 1]])
RSS = np.array([[math.cos(angle) * scale, -math.sin(angle + shear) * scale, 0],
[math.sin(angle) * scale, math.cos(angle + shear) * scale, 0], [0, 0, 1]])
matrix = T @ C @ RSS @ np.linalg.inv(C)
return matrix[:2, :]
def affine(img, angle, translate, scale, shear, interpolation=cv2.INTER_LINEAR, mode=cv2.BORDER_CONSTANT, fillcolor=0):
"""Apply affine transformation on the image keeping image center invariant
Args:
img (numpy ndarray): numpy ndarray to be transformed.
angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
interpolation (``cv2.INTER_NEAREST` or ``cv2.INTER_LINEAR`` or ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``):
An optional resampling filter.
See `filters`_ for more information.
If omitted, it is set to ``cv2.INTER_CUBIC``, for bicubic interpolation.
mode (``cv2.BORDER_CONSTANT`` or ``cv2.BORDER_REPLICATE`` or ``cv2.BORDER_REFLECT`` or ``cv2.BORDER_REFLECT_101``)
Method for filling in border regions.
Defaults to cv2.BORDER_CONSTANT, meaning areas outside the image are filled with a value (val, default 0)
val (int): Optional fill color for the area outside the transform in the output image. Default: 0
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"Argument translate should be a list or tuple of length 2"
assert scale > 0.0, "Argument scale should be positive"
output_size = img.shape[0:2]
center = (img.shape[1] * 0.5 + 0.5, img.shape[0] * 0.5 + 0.5)
matrix = _get_affine_matrix(center, angle, translate, scale, shear)
if img.shape[2] == 1:
return cv2.warpAffine(img, matrix, output_size[::-1], interpolation, borderMode=mode,
borderValue=fillcolor)[:, :, np.newaxis]
else:
return cv2.warpAffine(img, matrix, output_size[::-1], interpolation, borderMode=mode, borderValue=fillcolor)
def to_grayscale(img, num_output_channels: int = 1):
"""Convert image to grayscale version of image.
Args:
img (numpy ndarray): Image to be converted to grayscale.
num_output_channels: int
if 1 : returned image is single channel
if 3 : returned image is 3 channel with r = g = b
Returns:
numpy ndarray: Grayscale version of the image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if num_output_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
elif num_output_channels == 3:
# much faster than doing cvtColor to go back to gray
img = np.broadcast_to(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis], img.shape)
return img | 23,553 | 41.516245 | 122 | py |
mbtr | mbtr-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mbtr'
copyright = '2020, Lorenzo Nespoli'
author = 'Lorenzo Nespoli'
# The full version, including alpha/beta/rc tags
release = '0.1.3'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc','sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
latex_engine = 'xelatex'
latex_elements = {
}
latex_show_urls = 'footnote'
master_doc = 'index'
| 2,038 | 31.365079 | 79 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/train_extraadam.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable, grad
import torch.optim as optim
import time
import torchvision
import torchvision.transforms as transforms
import numpy as np
import argparse
import os
import json
import csv
import shutil
import sys
import models
import utils
from optim import ExtraAdam
parser = argparse.ArgumentParser()
parser.add_argument('output')
parser.add_argument('--model', choices=('resnet', 'dcgan'), default='resnet')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('-bs' ,'--batch-size', default=64, type=int)
parser.add_argument('--num-iter', default=500000, type=int)
parser.add_argument('-lrd', '--learning-rate-dis', default=5e-4, type=float)
parser.add_argument('-lrg', '--learning-rate-gen', default=5e-5, type=float)
parser.add_argument('-b1' ,'--beta1', default=0.5, type=float)
parser.add_argument('-b2' ,'--beta2', default=0.9, type=float)
parser.add_argument('-ema', default=0.9999, type=float)
parser.add_argument('-nz' ,'--num-latent', default=128, type=int)
parser.add_argument('-nfd' ,'--num-filters-dis', default=128, type=int)
parser.add_argument('-nfg' ,'--num-filters-gen', default=128, type=int)
parser.add_argument('-gp', '--gradient-penalty', default=10, type=float)
parser.add_argument('-m', '--mode', choices=('gan','ns-gan', 'wgan'), default='wgan')
parser.add_argument('-c', '--clip', default=0.01, type=float)
parser.add_argument('-d', '--distribution', choices=('normal', 'uniform'), default='normal')
parser.add_argument('--batchnorm-dis', action='store_true')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--inception-score', action='store_true')
parser.add_argument('--default', action='store_true')
args = parser.parse_args()
CUDA = args.cuda
MODEL = args.model
GRADIENT_PENALTY = args.gradient_penalty
OUTPUT_PATH = args.output
TENSORBOARD_FLAG = args.tensorboard
INCEPTION_SCORE_FLAG = args.inception_score
if args.default:
if args.model == 'resnet' and args.gradient_penalty != 0:
config = "config/default_resnet_wgangp_extraadam.json"
elif args.model == 'dcgan' and args.gradient_penalty != 0:
config = "config/default_dcgan_wgangp_extraadam.json"
elif args.model == 'dcgan' and args.gradient_penalty == 0:
config = "config/default_dcgan_wgan_extraadam.json"
else:
raise ValueError("Not default config available for this.")
with open(config) as f:
data = json.load(f)
args = argparse.Namespace(**data)
BATCH_SIZE = args.batch_size
N_ITER = args.num_iter
LEARNING_RATE_G = args.learning_rate_gen # It is really important to set different learning rates for the discriminator and generator
LEARNING_RATE_D = args.learning_rate_dis
BETA_1 = args.beta1
BETA_2 = args.beta2
BETA_EMA = args.ema
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
N_FILTERS_D = args.num_filters_dis
MODE = args.mode
CLIP = args.clip
DISTRIBUTION = args.distribution
BATCH_NORM_G = True
BATCH_NORM_D = args.batchnorm_dis
N_SAMPLES = 50000
RESOLUTION = 32
N_CHANNEL = 3
START_EPOCH = 0
EVAL_FREQ = 10000
SEED = args.seed
torch.manual_seed(SEED)
np.random.seed(SEED)
n_gen_update = 0
n_dis_update = 0
total_time = 0
if GRADIENT_PENALTY:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s-gp'%(MODEL, MODE), '%s/lrd=%.1e_lrg=%.1e/s%i/%i'%('extra_adam', LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
else:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s'%(MODEL, MODE), '%s/lrd=%.1e_lrg=%.1e/s%i/%i'%('extra_adam', LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
if TENSORBOARD_FLAG:
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir=os.path.join(OUTPUT_PATH, 'tensorboard'))
writer.add_text('config', json.dumps(vars(args), indent=2, sort_keys=True))
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, num_workers=1)
print 'Init....'
if not os.path.exists(os.path.join(OUTPUT_PATH, 'checkpoints')):
os.makedirs(os.path.join(OUTPUT_PATH, 'checkpoints'))
if not os.path.exists(os.path.join(OUTPUT_PATH, 'gen')):
os.makedirs(os.path.join(OUTPUT_PATH, 'gen'))
if INCEPTION_SCORE_FLAG:
import tflib
import tflib.inception_score
def get_inception_score():
all_samples = []
samples = torch.randn(N_SAMPLES, N_LATENT)
for i in xrange(0, N_SAMPLES, 100):
samples_100 = samples[i:i+100].cuda(0)
all_samples.append(gen(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
return tflib.inception_score.get_inception_score(list(all_samples))
inception_f = open(os.path.join(OUTPUT_PATH, 'inception.csv'), 'ab')
inception_writter = csv.writer(inception_f)
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
dis = models.ResNet32Discriminator(N_CHANNEL, 1, N_FILTERS_D, BATCH_NORM_D)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
dis = models.DCGAN32Discriminator(N_CHANNEL, 1, N_FILTERS_D, batchnorm=BATCH_NORM_D)
if CUDA:
gen = gen.cuda(0)
dis = dis.cuda(0)
gen.apply(lambda x: utils.weight_init(x, mode='normal'))
dis.apply(lambda x: utils.weight_init(x, mode='normal'))
dis_optimizer = ExtraAdam(dis.parameters(), lr=LEARNING_RATE_D, betas=(BETA_1, BETA_2))
gen_optimizer = ExtraAdam(gen.parameters(), lr=LEARNING_RATE_G, betas=(BETA_1, BETA_2))
with open(os.path.join(OUTPUT_PATH, 'config.json'), 'wb') as f:
json.dump(vars(args), f)
dataiter = iter(testloader)
examples, labels = dataiter.next()
torchvision.utils.save_image(utils.unormalize(examples), os.path.join(OUTPUT_PATH, 'examples.png'), 10)
z_examples = utils.sample(DISTRIBUTION, (100, N_LATENT))
if CUDA:
z_examples = z_examples.cuda(0)
gen_param_avg = []
gen_param_ema = []
for param in gen.parameters():
gen_param_avg.append(param.data.clone())
gen_param_ema.append(param.data.clone())
f = open(os.path.join(OUTPUT_PATH, 'results.csv'), 'ab')
f_writter = csv.writer(f)
print 'Training...'
n_iteration_t = 0
gen_inception_score = 0
while n_gen_update < N_ITER:
t = time.time()
avg_loss_G = 0
avg_loss_D = 0
avg_penalty = 0
num_samples = 0
penalty = Variable(torch.Tensor([0.]))
if CUDA:
penalty = penalty.cuda(0)
for i, data in enumerate(trainloader):
_t = time.time()
x_true, _ = data
x_true = Variable(x_true)
z = Variable(utils.sample(DISTRIBUTION, (len(x_true), N_LATENT)))
if CUDA:
x_true = x_true.cuda(0)
z = z.cuda(0)
x_gen = gen(z)
p_true, p_gen = dis(x_true), dis(x_gen)
gen_loss = utils.compute_gan_loss(p_true, p_gen, mode=MODE)
dis_loss = - gen_loss.clone()
if GRADIENT_PENALTY:
penalty = dis.get_penalty(x_true.data, x_gen.data)
dis_loss += GRADIENT_PENALTY*penalty
for p in gen.parameters():
p.requires_grad = False
dis_optimizer.zero_grad()
dis_loss.backward(retain_graph=True)
if (n_iteration_t+1)%2 != 0:
dis_optimizer.extrapolation()
else:
dis_optimizer.step()
for p in gen.parameters():
p.requires_grad = True
for p in dis.parameters():
p.requires_grad = False
gen_optimizer.zero_grad()
gen_loss.backward()
if (n_iteration_t+1)%2 != 0:
gen_optimizer.extrapolation()
else:
n_gen_update += 1
gen_optimizer.step()
for j, param in enumerate(gen.parameters()):
gen_param_avg[j] = gen_param_avg[j]*n_gen_update/(n_gen_update+1.) + param.data.clone()/(n_gen_update+1.)
gen_param_ema[j] = gen_param_ema[j]*BETA_EMA+ param.data.clone()*(1-BETA_EMA)
for p in dis.parameters():
p.requires_grad = True
if MODE =='wgan' and not GRADIENT_PENALTY:
for p in dis.parameters():
p.data.clamp_(-CLIP, CLIP)
total_time += time.time() - _t
if (n_iteration_t+1)%2 == 0:
avg_loss_D += dis_loss.item()*len(x_true)
avg_loss_G += gen_loss.item()*len(x_true)
avg_penalty += penalty.item()*len(x_true)
num_samples += len(x_true)
if n_gen_update%EVAL_FREQ == 1:
if INCEPTION_SCORE_FLAG:
gen_inception_score = get_inception_score()[0]
inception_writter.writerow((n_gen_update, gen_inception_score, total_time))
inception_f.flush()
if TENSORBOARD_FLAG:
writer.add_scalar('inception_score', gen_inception_score, n_gen_update)
torch.save({'args': vars(args), 'n_gen_update': n_gen_update, 'total_time': total_time, 'state_gen': gen.state_dict(), 'gen_param_avg': gen_param_avg, 'gen_param_ema': gen_param_ema}, os.path.join(OUTPUT_PATH, "checkpoints/%i.state"%n_gen_update))
n_iteration_t += 1
avg_loss_G /= num_samples
avg_loss_D /= num_samples
avg_penalty /= num_samples
print 'Iter: %i, Loss Generator: %.4f, Loss Discriminator: %.4f, Penalty: %.2e, IS: %.2f, Time: %.4f'%(n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, gen_inception_score, time.time() - t)
f_writter.writerow((n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, time.time() - t))
f.flush()
x_gen = gen(z_examples)
x = utils.unormalize(x_gen)
torchvision.utils.save_image(x.data, os.path.join(OUTPUT_PATH, 'gen/%i.png' % n_gen_update), 10)
if TENSORBOARD_FLAG:
writer.add_scalar('loss_G', avg_loss_G, n_gen_update)
writer.add_scalar('loss_D', avg_loss_D, n_gen_update)
writer.add_scalar('penalty', avg_penalty, n_gen_update)
x = torchvision.utils.make_grid(x.data, 10)
writer.add_image('gen', x.data, n_gen_update)
| 11,901 | 37.895425 | 263 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/train_optimisticadam.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable, grad
import torch.optim as optim
import time
import torchvision
import torchvision.transforms as transforms
import numpy as np
import argparse
import os
import json
import csv
import shutil
import sys
import models
import utils
from optim import OptimisticAdam
parser = argparse.ArgumentParser()
parser.add_argument('output')
parser.add_argument('--model', choices=('resnet', 'dcgan'), default='resnet')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('-bs' ,'--batch-size', default=64, type=int)
parser.add_argument('--num-iter', default=500000, type=int)
parser.add_argument('-lrd', '--learning-rate-dis', default=5e-4, type=float)
parser.add_argument('-lrg', '--learning-rate-gen', default=5e-5, type=float)
parser.add_argument('-b1' ,'--beta1', default=0.5, type=float)
parser.add_argument('-b2' ,'--beta2', default=0.9, type=float)
parser.add_argument('-ema', default=0.9999, type=float)
parser.add_argument('-nz' ,'--num-latent', default=128, type=int)
parser.add_argument('-nfd' ,'--num-filters-dis', default=128, type=int)
parser.add_argument('-nfg' ,'--num-filters-gen', default=128, type=int)
parser.add_argument('-gp', '--gradient-penalty', default=10, type=float)
parser.add_argument('-m', '--mode', choices=('gan','ns-gan', 'wgan'), default='wgan')
parser.add_argument('-c', '--clip', default=0.01, type=float)
parser.add_argument('-d', '--distribution', choices=('normal', 'uniform'), default='normal')
parser.add_argument('--batchnorm-dis', action='store_true')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--inception-score', action='store_true')
parser.add_argument('--default', action='store_true')
args = parser.parse_args()
CUDA = args.cuda
MODEL = args.model
GRADIENT_PENALTY = args.gradient_penalty
OUTPUT_PATH = args.output
TENSORBOARD_FLAG = args.tensorboard
INCEPTION_SCORE_FLAG = args.inception_score
if args.default:
if args.model == 'resnet' and args.gradient_penalty != 0:
config = "config/default_resnet_wgangp_optimisticadam.json"
elif args.model == 'dcgan' and args.gradient_penalty != 0:
config = "config/default_dcgan_wgangp_optimisticadam.json"
elif args.model == 'dcgan' and args.gradient_penalty == 0:
config = "config/default_dcgan_wgan_optimisticadam.json"
else:
raise ValueError("Not default config available for this.")
with open(config) as f:
data = json.load(f)
args = argparse.Namespace(**data)
BATCH_SIZE = args.batch_size
N_ITER = args.num_iter
LEARNING_RATE_G = args.learning_rate_gen # It is really important to set different learning rates for the discriminator and generator
LEARNING_RATE_D = args.learning_rate_dis
BETA_1 = args.beta1
BETA_2 = args.beta2
BETA_EMA = args.ema
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
N_FILTERS_D = args.num_filters_dis
MODE = args.mode
CLIP = args.clip
DISTRIBUTION = args.distribution
BATCH_NORM_G = True
BATCH_NORM_D = args.batchnorm_dis
N_SAMPLES = 50000
RESOLUTION = 32
N_CHANNEL = 3
START_EPOCH = 0
EVAL_FREQ = 10000
SEED = args.seed
torch.manual_seed(SEED)
np.random.seed(SEED)
n_gen_update = 0
n_dis_update = 0
total_time = 0
if GRADIENT_PENALTY:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s-gp'%(MODEL, MODE), '%s/lrd=%.1e_lrg=%.1e/s%i/%i'%('optimisticadam', LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
else:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s'%(MODEL, MODE), '%s/lrd=%.1e_lrg=%.1e/s%i/%i'%('optimisticadam', LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
if TENSORBOARD_FLAG:
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir=os.path.join(OUTPUT_PATH, 'tensorboard'))
writer.add_text('config', json.dumps(vars(args), indent=2, sort_keys=True))
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, num_workers=1)
print 'Init....'
if not os.path.exists(os.path.join(OUTPUT_PATH, 'checkpoints')):
os.makedirs(os.path.join(OUTPUT_PATH, 'checkpoints'))
if not os.path.exists(os.path.join(OUTPUT_PATH, 'gen')):
os.makedirs(os.path.join(OUTPUT_PATH, 'gen'))
if INCEPTION_SCORE_FLAG:
import tflib
import tflib.inception_score
def get_inception_score():
all_samples = []
samples = torch.randn(N_SAMPLES, N_LATENT)
for i in xrange(0, N_SAMPLES, 100):
samples_100 = samples[i:i+100].cuda(0)
all_samples.append(gen(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
return tflib.inception_score.get_inception_score(list(all_samples))
inception_f = open(os.path.join(OUTPUT_PATH, 'inception.csv'), 'ab')
inception_writter = csv.writer(inception_f)
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
dis = models.ResNet32Discriminator(N_CHANNEL, 1, N_FILTERS_D, BATCH_NORM_D)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
dis = models.DCGAN32Discriminator(N_CHANNEL, 1, N_FILTERS_D, batchnorm=BATCH_NORM_D)
if CUDA:
gen = gen.cuda(0)
dis = dis.cuda(0)
gen.apply(lambda x: utils.weight_init(x, mode='normal'))
dis.apply(lambda x: utils.weight_init(x, mode='normal'))
dis_optimizer = OptimisticAdam(dis.parameters(), lr=LEARNING_RATE_D, betas=(BETA_1, BETA_2))
gen_optimizer = OptimisticAdam(gen.parameters(), lr=LEARNING_RATE_G, betas=(BETA_1, BETA_2))
with open(os.path.join(OUTPUT_PATH, 'config.json'), 'wb') as f:
json.dump(vars(args), f)
dataiter = iter(testloader)
examples, labels = dataiter.next()
torchvision.utils.save_image(utils.unormalize(examples), os.path.join(OUTPUT_PATH, 'examples.png'), 10)
z_examples = utils.sample(DISTRIBUTION, (100, N_LATENT))
if CUDA:
z_examples = z_examples.cuda(0)
gen_param_avg = []
gen_param_ema = []
for param in gen.parameters():
gen_param_avg.append(param.data.clone())
gen_param_ema.append(param.data.clone())
f = open(os.path.join(OUTPUT_PATH, 'results.csv'), 'ab')
f_writter = csv.writer(f)
print 'Training...'
n_iteration_t = 0
gen_inception_score = 0
while n_gen_update < N_ITER:
t = time.time()
avg_loss_G = 0
avg_loss_D = 0
avg_penalty = 0
num_samples = 0
penalty = Variable(torch.Tensor([0.]))
if CUDA:
penalty = penalty.cuda(0)
for i, data in enumerate(trainloader):
_t = time.time()
x_true, _ = data
x_true = Variable(x_true)
z = Variable(utils.sample(DISTRIBUTION, (len(x_true), N_LATENT)))
if CUDA:
x_true = x_true.cuda(0)
z = z.cuda(0)
x_gen = gen(z)
p_true, p_gen = dis(x_true), dis(x_gen)
gen_loss = utils.compute_gan_loss(p_true, p_gen, mode=MODE)
dis_loss = - gen_loss.clone()
if GRADIENT_PENALTY:
penalty = dis.get_penalty(x_true.data, x_gen.data)
dis_loss += GRADIENT_PENALTY*penalty
for p in gen.parameters():
p.requires_grad = False
dis_optimizer.zero_grad()
dis_loss.backward(retain_graph=True)
dis_optimizer.step()
if MODE =='wgan' and not GRADIENT_PENALTY:
for p in dis.parameters():
p.data.clamp_(-CLIP, CLIP)
for p in gen.parameters():
p.requires_grad = True
for p in dis.parameters():
p.requires_grad = False
gen_optimizer.zero_grad()
gen_loss.backward()
gen_optimizer.step()
for i, p in enumerate(dis.parameters()):
p.requires_grad = True
n_gen_update += 1
for j, param in enumerate(gen.parameters()):
gen_param_avg[j] = gen_param_avg[j]*n_gen_update/(n_gen_update+1.) + param.data.clone()/(n_gen_update+1.)
gen_param_ema[j] = gen_param_ema[j]*BETA_EMA+ param.data.clone()*(1-BETA_EMA)
total_time += time.time() - _t
avg_loss_D += dis_loss.item()*len(x_true)
avg_loss_G += gen_loss.item()*len(x_true)
avg_penalty += penalty.item()*len(x_true)
num_samples += len(x_true)
if n_gen_update%EVAL_FREQ == 1:
if INCEPTION_SCORE_FLAG:
gen_inception_score = get_inception_score()[0]
inception_writter.writerow((n_gen_update, gen_inception_score, total_time))
inception_f.flush()
if TENSORBOARD_FLAG:
writer.add_scalar('inception_score', gen_inception_score, n_gen_update)
torch.save({'args': vars(args), 'n_gen_update': n_gen_update, 'total_time': total_time, 'state_gen': gen.state_dict(), 'gen_param_avg': gen_param_avg, 'gen_param_ema': gen_param_ema}, os.path.join(OUTPUT_PATH, "checkpoints/%i.state"%n_gen_update))
n_iteration_t += 1
avg_loss_G /= num_samples
avg_loss_D /= num_samples
avg_penalty /= num_samples
print 'Iter: %i, Loss Generator: %.4f, Loss Discriminator: %.4f, Penalty: %.2e, IS: %.2f, Time: %.4f'%(n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, gen_inception_score, time.time() - t)
f_writter.writerow((n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, time.time() - t))
f.flush()
x_gen = gen(z_examples)
x = utils.unormalize(x_gen)
torchvision.utils.save_image(x.data, os.path.join(OUTPUT_PATH, 'gen/%i.png' % n_gen_update), 10)
if TENSORBOARD_FLAG:
writer.add_scalar('loss_G', avg_loss_G, n_gen_update)
writer.add_scalar('loss_D', avg_loss_D, n_gen_update)
writer.add_scalar('penalty', avg_penalty, n_gen_update)
x = torchvision.utils.make_grid(x.data, 10)
writer.add_image('gen', x.data, n_gen_update)
| 11,654 | 38.508475 | 259 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/eval_fid.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import models
import argparse
import os
import torch
from torch.autograd import Variable
import numpy as np
import csv
import glob
parser = argparse.ArgumentParser()
parser.add_argument('input')
args = parser.parse_args()
INPUT_PATH = args.input
CUDA = True
BATCH_SIZE = 100
N_CHANNEL = 3
RESOLUTION = 32
NUM_SAMPLES = 50000
DEVICE = 'cpu'
checkpoint = torch.load(INPUT_PATH, map_location=DEVICE)
args = argparse.Namespace(**checkpoint['args'])
MODEL = args.model
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
BATCH_NORM_G = True
print "Init..."
import tflib.fid as fid
import tensorflow as tf
stats_path = 'tflib/data/fid_stats_cifar10_train.npz'
inception_path = fid.check_or_download_inception('tflib/model')
f = np.load(stats_path)
mu_real, sigma_real = f['mu'][:], f['sigma'][:]
f.close()
fid.create_inception_graph(inception_path) # load the graph into the current TF graph
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def get_fid_score():
all_samples = []
samples = torch.randn(NUM_SAMPLES, N_LATENT)
for i in xrange(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i+BATCH_SIZE]
if CUDA:
samples_100 = samples_100.cuda(0)
all_samples.append(gen(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
mu_gen, sigma_gen = fid.calculate_activation_statistics(all_samples, sess, batch_size=BATCH_SIZE)
fid_value = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
return fid_value
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
print "Eval..."
gen.load_state_dict(checkpoint['state_gen'])
if CUDA:
gen.cuda(0)
inception_score = get_fid_score()
for j, param in enumerate(gen.parameters()):
param.data = checkpoint['gen_param_avg'][j]
if CUDA:
gen = gen.cuda(0)
inception_score_avg = get_fid_score()
for j, param in enumerate(gen.parameters()):
param.data = checkpoint['gen_param_ema'][j]
if CUDA:
gen = gen.cuda(0)
inception_score_ema = get_fid_score()
print 'IS: %.2f, IS Avg: %.2f, IS EMA: %.2f'%(inception_score, inception_score_avg, inception_score_ema)
| 3,742 | 33.027273 | 105 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/utils.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch
import torch.autograd as autograd
import torch.nn as nn
import numpy as np
def clip_weights(params, clip=0.01):
for p in params:
p.clamp_(-clip, clip)
def unormalize(x):
return x/2. + 0.5
def sample(name, size):
if name == 'normal':
return torch.zeros(size).normal_()
elif name == 'uniform':
return torch.zeros(size).uniform_()
else:
raise ValueError()
def weight_init(m, mode='normal'):
if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
if mode == 'normal':
nn.init.normal_(m.weight.data, 0.0, 0.02)
nn.init.constant_(m.bias.data, 0.)
elif mode == 'kaimingu':
nn.init.kaiming_uniform_(m.weight.data)
nn.init.constant_(m.bias.data, 0.)
elif mode == 'orthogonal':
nn.init.orthogonal_(m.weight.data, 0.8)
def compute_gan_loss(p_true, p_gen, mode='gan', gen_flag=False):
if mode == 'ns-gan' and gen_flag:
loss = (p_true.clamp(max=0) - torch.log(1+torch.exp(-p_true.abs()))).mean() - (p_gen.clamp(max=0) - torch.log(1+torch.exp(-p_gen.abs()))).mean()
elif mode == 'gan' or mode == 'gan++':
loss = (p_true.clamp(max=0) - torch.log(1+torch.exp(-p_true.abs()))).mean() - (p_gen.clamp(min=0) + torch.log(1+torch.exp(-p_gen.abs()))).mean()
elif mode == 'wgan':
loss = p_true.mean() - p_gen.mean()
else:
raise NotImplementedError()
return loss
| 2,680 | 39.014925 | 152 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/train_pastextraadam.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable, grad
import torch.optim as optim
import time
import torchvision
import torchvision.transforms as transforms
import numpy as np
import argparse
import os
import json
import csv
import shutil
import sys
import models
import utils
from optim import ExtraAdam
parser = argparse.ArgumentParser()
parser.add_argument('output')
parser.add_argument('--model', choices=('resnet', 'dcgan'), default='resnet')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('-bs' ,'--batch-size', default=64, type=int)
parser.add_argument('--num-iter', default=500000, type=int)
parser.add_argument('-lrd', '--learning-rate-dis', default=2e-4, type=float)
parser.add_argument('-lrg', '--learning-rate-gen', default=2e-5, type=float)
parser.add_argument('-b1' ,'--beta1', default=0.5, type=float)
parser.add_argument('-b2' ,'--beta2', default=0.9, type=float)
parser.add_argument('-ema', default=0.9999, type=float)
parser.add_argument('-nz' ,'--num-latent', default=128, type=int)
parser.add_argument('-nfd' ,'--num-filters-dis', default=128, type=int)
parser.add_argument('-nfg' ,'--num-filters-gen', default=128, type=int)
parser.add_argument('-gp', '--gradient-penalty', default=10, type=float)
parser.add_argument('-m', '--mode', choices=('gan','ns-gan', 'wgan'), default='wgan')
parser.add_argument('-c', '--clip', default=0.01, type=float)
parser.add_argument('-d', '--distribution', choices=('normal', 'uniform'), default='normal')
parser.add_argument('--batchnorm-dis', action='store_true')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--inception-score', action='store_true')
parser.add_argument('--default', action='store_true')
args = parser.parse_args()
CUDA = args.cuda
MODEL = args.model
GRADIENT_PENALTY = args.gradient_penalty
OUTPUT_PATH = args.output
TENSORBOARD_FLAG = args.tensorboard
INCEPTION_SCORE_FLAG = args.inception_score
if args.default:
if args.model == 'resnet' and args.gradient_penalty != 0:
config = "config/default_resnet_wgangp_pastextraadam.json"
elif args.model == 'dcgan' and args.gradient_penalty != 0:
config = "config/default_dcgan_wgangp_pastextraadam.json"
elif args.model == 'dcgan' and args.gradient_penalty == 0:
config = "config/default_dcgan_wgan_pastextraadam.json"
else:
raise ValueError("Not default config available for this.")
with open(config) as f:
data = json.load(f)
args = argparse.Namespace(**data)
BATCH_SIZE = args.batch_size
N_ITER = args.num_iter
LEARNING_RATE_G = args.learning_rate_gen # It is really important to set different learning rates for the discriminator and generator
LEARNING_RATE_D = args.learning_rate_dis
BETA_1 = args.beta1
BETA_2 = args.beta2
BETA_EMA = args.ema
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
N_FILTERS_D = args.num_filters_dis
MODE = args.mode
CLIP = args.clip
DISTRIBUTION = args.distribution
BATCH_NORM_G = True
BATCH_NORM_D = args.batchnorm_dis
N_SAMPLES = 50000
RESOLUTION = 32
N_CHANNEL = 3
START_EPOCH = 0
EVAL_FREQ = 10000
SEED = args.seed
torch.manual_seed(SEED)
np.random.seed(SEED)
n_gen_update = 0
n_dis_update = 0
total_time = 0
if GRADIENT_PENALTY:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s-gp'%(MODEL, MODE), '%s/lrd=%.1e_lrg=%.1e/s%i/%i'%('pastextraadam', LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
else:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s'%(MODEL, MODE), '%s/lrd=%.1e_lrg=%.1e/s%i/%i'%('pastextraadam', LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
if TENSORBOARD_FLAG:
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir=os.path.join(OUTPUT_PATH, 'tensorboard'))
writer.add_text('config', json.dumps(vars(args), indent=2, sort_keys=True))
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, num_workers=1)
print 'Init....'
if not os.path.exists(os.path.join(OUTPUT_PATH, 'checkpoints')):
os.makedirs(os.path.join(OUTPUT_PATH, 'checkpoints'))
if not os.path.exists(os.path.join(OUTPUT_PATH, 'gen')):
os.makedirs(os.path.join(OUTPUT_PATH, 'gen'))
if INCEPTION_SCORE_FLAG:
import tflib
import tflib.inception_score
def get_inception_score():
all_samples = []
samples = torch.randn(N_SAMPLES, N_LATENT)
for i in xrange(0, N_SAMPLES, 100):
samples_100 = samples[i:i+100].cuda(0)
all_samples.append(gen(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
return tflib.inception_score.get_inception_score(list(all_samples))
inception_f = open(os.path.join(OUTPUT_PATH, 'inception.csv'), 'ab')
inception_writter = csv.writer(inception_f)
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
dis = models.ResNet32Discriminator(N_CHANNEL, 1, N_FILTERS_D, BATCH_NORM_D)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
dis = models.DCGAN32Discriminator(N_CHANNEL, 1, N_FILTERS_D, batchnorm=BATCH_NORM_D)
if CUDA:
gen = gen.cuda(0)
dis = dis.cuda(0)
gen.apply(lambda x: utils.weight_init(x, mode='normal'))
dis.apply(lambda x: utils.weight_init(x, mode='normal'))
dis_optimizer = ExtraAdam(dis.parameters(), lr=LEARNING_RATE_D, betas=(BETA_1, BETA_2))
gen_optimizer = ExtraAdam(gen.parameters(), lr=LEARNING_RATE_G, betas=(BETA_1, BETA_2))
with open(os.path.join(OUTPUT_PATH, 'config.json'), 'wb') as f:
json.dump(vars(args), f)
dataiter = iter(testloader)
examples, labels = dataiter.next()
torchvision.utils.save_image(utils.unormalize(examples), os.path.join(OUTPUT_PATH, 'examples.png'), 10)
z_examples = utils.sample(DISTRIBUTION, (100, N_LATENT))
if CUDA:
z_examples = z_examples.cuda(0)
gen_param_avg = []
gen_param_ema = []
for param in gen.parameters():
gen_param_avg.append(param.data.clone())
gen_param_ema.append(param.data.clone())
f = open(os.path.join(OUTPUT_PATH, 'results.csv'), 'ab')
f_writter = csv.writer(f)
print 'Training...'
n_iteration_t = 0
gen_inception_score = 0
while n_gen_update < N_ITER:
t = time.time()
avg_loss_G = 0
avg_loss_D = 0
avg_penalty = 0
num_samples = 0
penalty = Variable(torch.Tensor([0.]))
if CUDA:
penalty = penalty.cuda(0)
for i, data in enumerate(trainloader):
_t = time.time()
x_true, _ = data
x_true = Variable(x_true)
z = Variable(utils.sample(DISTRIBUTION, (len(x_true), N_LATENT)))
if CUDA:
x_true = x_true.cuda(0)
z = z.cuda(0)
dis_optimizer.extrapolation()
gen_optimizer.extrapolation()
if MODE =='wgan' and not GRADIENT_PENALTY:
for p in dis.parameters():
p.data.clamp_(-CLIP, CLIP)
x_gen = gen(z)
p_true, p_gen = dis(x_true), dis(x_gen)
gen_loss = utils.compute_gan_loss(p_true, p_gen, mode=MODE)
dis_loss = - gen_loss.clone()
if GRADIENT_PENALTY:
penalty = dis.get_penalty(x_true.data, x_gen.data)
dis_loss += GRADIENT_PENALTY*penalty
for p in gen.parameters():
p.requires_grad = False
dis_optimizer.zero_grad()
dis_loss.backward(retain_graph=True)
dis_optimizer.step()
if MODE =='wgan' and not GRADIENT_PENALTY:
for p in dis.parameters():
p.data.clamp_(-CLIP, CLIP)
for p in gen.parameters():
p.requires_grad = True
for p in dis.parameters():
p.requires_grad = False
gen_optimizer.zero_grad()
gen_loss.backward()
gen_optimizer.step()
for i, p in enumerate(dis.parameters()):
p.requires_grad = True
n_gen_update += 1
for j, param in enumerate(gen.parameters()):
gen_param_avg[j] = gen_param_avg[j]*n_gen_update/(n_gen_update+1.) + param.data.clone()/(n_gen_update+1.)
gen_param_ema[j] = gen_param_ema[j]*BETA_EMA+ param.data.clone()*(1-BETA_EMA)
total_time += time.time() - _t
avg_loss_D += dis_loss.item()*len(x_true)
avg_loss_G += gen_loss.item()*len(x_true)
avg_penalty += penalty.item()*len(x_true)
num_samples += len(x_true)
if n_gen_update%EVAL_FREQ == 1:
if INCEPTION_SCORE_FLAG:
gen_inception_score = get_inception_score()[0]
inception_writter.writerow((n_gen_update, gen_inception_score, total_time))
inception_f.flush()
if TENSORBOARD_FLAG:
writer.add_scalar('inception_score', gen_inception_score, n_gen_update)
torch.save({'args': vars(args), 'n_gen_update': n_gen_update, 'total_time': total_time, 'state_gen': gen.state_dict(), 'gen_param_avg': gen_param_avg, 'gen_param_ema': gen_param_ema}, os.path.join(OUTPUT_PATH, "checkpoints/%i.state"%n_gen_update))
n_iteration_t += 1
avg_loss_G /= num_samples
avg_loss_D /= num_samples
avg_penalty /= num_samples
print 'Iter: %i, Loss Generator: %.4f, Loss Discriminator: %.4f, Penalty: %.2e, IS: %.2f, Time: %.4f'%(n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, gen_inception_score, time.time() - t)
f_writter.writerow((n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, time.time() - t))
f.flush()
x_gen = gen(z_examples)
x = utils.unormalize(x_gen)
torchvision.utils.save_image(x.data, os.path.join(OUTPUT_PATH, 'gen/%i.png' % n_gen_update), 10)
if TENSORBOARD_FLAG:
writer.add_scalar('loss_G', avg_loss_G, n_gen_update)
writer.add_scalar('loss_D', avg_loss_D, n_gen_update)
writer.add_scalar('penalty', avg_penalty, n_gen_update)
x = torchvision.utils.make_grid(x.data, 10)
writer.add_image('gen', x.data, n_gen_update)
| 11,844 | 38.352159 | 259 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/train_adam.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable, grad
import torch.optim as optim
import time
import torchvision
import torchvision.transforms as transforms
import numpy as np
import argparse
import os
import json
import csv
import shutil
import sys
import models
import utils
parser = argparse.ArgumentParser()
parser.add_argument('output')
parser.add_argument('--model', choices=('resnet', 'dcgan'), default='resnet')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('-bs' ,'--batch-size', default=64, type=int)
parser.add_argument('--num-iter', default=500000, type=int)
parser.add_argument('-lrd', '--learning-rate-dis', default=2e-4, type=float)
parser.add_argument('-lrg', '--learning-rate-gen', default=2e-5, type=float)
parser.add_argument('-b1' ,'--beta1', default=0.5, type=float)
parser.add_argument('-b2' ,'--beta2', default=0.9, type=float)
parser.add_argument('-ema', default=0.9999, type=float)
parser.add_argument('-nz' ,'--num-latent', default=128, type=int)
parser.add_argument('-nfd' ,'--num-filters-dis', default=128, type=int)
parser.add_argument('-nfg' ,'--num-filters-gen', default=128, type=int)
parser.add_argument('-gp', '--gradient-penalty', default=10, type=float)
parser.add_argument('-m', '--mode', choices=('gan','ns-gan', 'wgan'), default='wgan')
parser.add_argument('-c', '--clip', default=0.01, type=float)
parser.add_argument('-d', '--distribution', choices=('normal', 'uniform'), default='normal')
parser.add_argument('--batchnorm-dis', action='store_true')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--inception-score', action='store_true')
parser.add_argument('--default', action='store_true')
parser.add_argument('-u', '--update-frequency', default=5, type=int)
args = parser.parse_args()
CUDA = args.cuda
MODEL = args.model
GRADIENT_PENALTY = args.gradient_penalty
OUTPUT_PATH = args.output
TENSORBOARD_FLAG = args.tensorboard
INCEPTION_SCORE_FLAG = args.inception_score
UPDATE_FREQUENCY = args.update_frequency
if args.default:
try:
if args.gradient_penalty == 0:
config = "config/default_%s_wgan_adam%i_.json"%(args.model, UPDATE_FREQUENCY)
else:
config = "config/default_%s_wgangp_adam%i.json"%(args.model, UPDATE_FREQUENCY)
except:
raise ValueError("Not default config available for this.")
with open(config) as f:
data = json.load(f)
args = argparse.Namespace(**data)
BATCH_SIZE = args.batch_size
N_ITER = args.num_iter
LEARNING_RATE_G = args.learning_rate_gen # It is really important to set different learning rates for the discriminator and generator
LEARNING_RATE_D = args.learning_rate_dis
BETA_1 = args.beta1
BETA_2 = args.beta2
BETA_EMA = args.ema
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
N_FILTERS_D = args.num_filters_dis
MODE = args.mode
CLIP = args.clip
DISTRIBUTION = args.distribution
BATCH_NORM_G = True
BATCH_NORM_D = args.batchnorm_dis
N_SAMPLES = 50000
RESOLUTION = 32
N_CHANNEL = 3
START_EPOCH = 0
EVAL_FREQ = 10000
SEED = args.seed
torch.manual_seed(SEED)
np.random.seed(SEED)
n_gen_update = 0
n_dis_update = 0
total_time = 0
if GRADIENT_PENALTY:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s-gp'%(MODEL, MODE), '%s_%i/lrd=%.1e_lrg=%.1e/s%i/%i'%('adam', UPDATE_FREQUENCY, LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
else:
OUTPUT_PATH = os.path.join(OUTPUT_PATH, '%s_%s'%(MODEL, MODE), '%s_%i/lrd=%.1e_lrg=%.1e/s%i/%i'%('adam', UPDATE_FREQUENCY, LEARNING_RATE_D, LEARNING_RATE_G, SEED, int(time.time())))
if TENSORBOARD_FLAG:
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir=os.path.join(OUTPUT_PATH, 'tensorboard'))
writer.add_text('config', json.dumps(vars(args), indent=2, sort_keys=True))
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, num_workers=1)
print 'Init....'
if not os.path.exists(os.path.join(OUTPUT_PATH, 'checkpoints')):
os.makedirs(os.path.join(OUTPUT_PATH, 'checkpoints'))
if not os.path.exists(os.path.join(OUTPUT_PATH, 'gen')):
os.makedirs(os.path.join(OUTPUT_PATH, 'gen'))
if INCEPTION_SCORE_FLAG:
import tflib
import tflib.inception_score
def get_inception_score():
all_samples = []
samples = torch.randn(N_SAMPLES, N_LATENT)
for i in xrange(0, N_SAMPLES, 100):
samples_100 = samples[i:i+100].cuda(0)
all_samples.append(gen(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
return tflib.inception_score.get_inception_score(list(all_samples))
inception_f = open(os.path.join(OUTPUT_PATH, 'inception.csv'), 'ab')
inception_writter = csv.writer(inception_f)
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
dis = models.ResNet32Discriminator(N_CHANNEL, 1, N_FILTERS_D, BATCH_NORM_D)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
dis = models.DCGAN32Discriminator(N_CHANNEL, 1, N_FILTERS_D, batchnorm=BATCH_NORM_D)
if CUDA:
gen = gen.cuda(0)
dis = dis.cuda(0)
gen.apply(lambda x: utils.weight_init(x, mode='normal'))
dis.apply(lambda x: utils.weight_init(x, mode='normal'))
dis_optimizer = optim.Adam(dis.parameters(), lr=LEARNING_RATE_D, betas=(BETA_1, BETA_2))
gen_optimizer = optim.Adam(gen.parameters(), lr=LEARNING_RATE_G, betas=(BETA_1, BETA_2))
with open(os.path.join(OUTPUT_PATH, 'config.json'), 'wb') as f:
json.dump(vars(args), f)
dataiter = iter(testloader)
examples, labels = dataiter.next()
torchvision.utils.save_image(utils.unormalize(examples), os.path.join(OUTPUT_PATH, 'examples.png'), 10)
z_examples = utils.sample(DISTRIBUTION, (100, N_LATENT))
if CUDA:
z_examples = z_examples.cuda(0)
gen_param_avg = []
gen_param_ema = []
for param in gen.parameters():
gen_param_avg.append(param.data.clone())
gen_param_ema.append(param.data.clone())
f = open(os.path.join(OUTPUT_PATH, 'results.csv'), 'ab')
f_writter = csv.writer(f)
print 'Training...'
n_iteration_t = 0
gen_inception_score = 0
while n_gen_update < N_ITER:
t = time.time()
avg_loss_G = 0
avg_loss_D = 0
avg_penalty = 0
d_samples = 0
g_samples = 0
penalty = Variable(torch.Tensor([0.]))
if CUDA:
penalty = penalty.cuda(0)
for i, data in enumerate(trainloader):
_t = time.time()
x_true, _ = data
x_true = Variable(x_true)
z = Variable(utils.sample(DISTRIBUTION, (len(x_true), N_LATENT)))
if CUDA:
x_true = x_true.cuda(0)
z = z.cuda(0)
x_gen = gen(z)
p_true, p_gen = dis(x_true), dis(x_gen)
if UPDATE_FREQUENCY==1 or (n_iteration_t+1)%UPDATE_FREQUENCY != 0:
for p in gen.parameters():
p.requires_grad = False
dis_optimizer.zero_grad()
dis_loss = - utils.compute_gan_loss(p_true, p_gen, mode=MODE)
if GRADIENT_PENALTY:
penalty = dis.get_penalty(x_true.data, x_gen.data)
loss = dis_loss + GRADIENT_PENALTY*penalty
if UPDATE_FREQUENCY == 1:
loss.backward(retain_graph=True)
else:
loss.backward()
dis_optimizer.step()
if MODE =='wgan' and not GRADIENT_PENALTY:
for p in dis.parameters():
p.data.clamp_(-CLIP, CLIP)
n_dis_update += 1
avg_loss_D += dis_loss.item()*len(x_true)
avg_penalty += penalty.item()*len(x_true)
d_samples += len(x_true)
for p in gen.parameters():
p.requires_grad = True
total_time += time.time() - _t
if UPDATE_FREQUENCY==1 or (n_iteration_t+1)%UPDATE_FREQUENCY == 0:
for p in dis.parameters():
p.requires_grad = False
gen_optimizer.zero_grad()
loss = utils.compute_gan_loss(p_true, p_gen, mode=MODE, gen_flag=True)
loss.backward()
gen_optimizer.step()
avg_loss_G += loss.item()*len(x_true)
n_gen_update += 1
for j, param in enumerate(gen.parameters()):
gen_param_avg[j] = gen_param_avg[j]*n_gen_update/(n_gen_update+1.) + param.data.clone()/(n_gen_update+1.)
gen_param_ema[j] = gen_param_ema[j]*BETA_EMA+ param.data.clone()*(1-BETA_EMA)
g_samples += len(x_true)
for p in dis.parameters():
p.requires_grad = True
total_time += time.time() - _t
if n_gen_update%EVAL_FREQ == 1:
if INCEPTION_SCORE_FLAG:
gen_inception_score = get_inception_score()[0]
inception_writter.writerow((n_gen_update, gen_inception_score, total_time))
inception_f.flush()
if TENSORBOARD_FLAG:
writer.add_scalar('inception_score', gen_inception_score, n_gen_update)
torch.save({'args': vars(args), 'n_gen_update': n_gen_update, 'total_time': total_time, 'state_gen': gen.state_dict(), 'gen_param_avg': gen_param_avg, 'gen_param_ema': gen_param_ema}, os.path.join(OUTPUT_PATH, "checkpoints/%i.state"%n_gen_update))
n_iteration_t += 1
avg_loss_G /= g_samples
avg_loss_D /= d_samples
avg_penalty /= d_samples
print 'Iter: %i, Loss Generator: %.4f, Loss Discriminator: %.4f, Penalty: %.2e, IS: %.2f, Time: %.4f'%(n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, gen_inception_score, time.time() - t)
f_writter.writerow((n_gen_update, avg_loss_G, avg_loss_D, avg_penalty, time.time() - t))
f.flush()
x_gen = gen(z_examples)
x = utils.unormalize(x_gen)
torchvision.utils.save_image(x.data, os.path.join(OUTPUT_PATH, 'gen/%i.png' % n_gen_update), 10)
if TENSORBOARD_FLAG:
writer.add_scalar('loss_G', avg_loss_G, n_gen_update)
writer.add_scalar('loss_D', avg_loss_D, n_gen_update)
writer.add_scalar('penalty', avg_penalty, n_gen_update)
x = torchvision.utils.make_grid(x.data, 10)
writer.add_image('gen', x.data, n_gen_update)
| 12,152 | 37.097179 | 263 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/eval_inception_score.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import models
import argparse
import os
import torch
from torch.autograd import Variable
import numpy as np
import csv
import glob
parser = argparse.ArgumentParser()
parser.add_argument('input')
args = parser.parse_args()
INPUT_PATH = args.input
CUDA = True
BATCH_SIZE = 1000
N_CHANNEL = 3
RESOLUTION = 32
NUM_SAMPLES = 50000
DEVICE = 'cpu'
checkpoint = torch.load(INPUT_PATH, map_location=DEVICE)
args = argparse.Namespace(**checkpoint['args'])
MODEL = args.model
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
BATCH_NORM_G = True
print "Init..."
import tflib
import tflib.inception_score
def get_inception_score():
all_samples = []
samples = torch.randn(NUM_SAMPLES, N_LATENT)
for i in xrange(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i+BATCH_SIZE]
if CUDA:
samples_100 = samples_100.cuda(0)
all_samples.append(gen(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
return tflib.inception_score.get_inception_score(list(all_samples))
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
print "Eval..."
gen.load_state_dict(checkpoint['state_gen'])
if CUDA:
gen.cuda(0)
inception_score = get_inception_score()[0]
for j, param in enumerate(gen.parameters()):
param.data = checkpoint['gen_param_avg'][j]
if CUDA:
gen = gen.cuda(0)
inception_score_avg = get_inception_score()[0]
for j, param in enumerate(gen.parameters()):
param.data = checkpoint['gen_param_ema'][j]
if CUDA:
gen = gen.cuda(0)
inception_score_ema = get_inception_score()[0]
print 'IS: %.2f, IS Avg: %.2f, IS EMA: %.2f'%(inception_score, inception_score_avg, inception_score_ema)
| 3,176 | 32.797872 | 104 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/models/discriminator.py | # MIT License
# Copyright (c) 2017 Ishaan Gulrajani
# Copyright (c) 2017 Marvin Cao
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from torch.autograd import Variable, grad
import torch.nn as nn
import torch
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def forward(self):
raise NotImplementedError()
def get_penalty(self, x_true, x_gen):
x_true = x_true.view_as(x_gen)
alpha = torch.rand((len(x_true),)+(1,)*(x_true.dim()-1))
if x_true.is_cuda:
alpha = alpha.cuda(x_true.get_device())
x_penalty = Variable(alpha*x_true + (1-alpha)*x_gen, requires_grad=True)
p_penalty = self.forward(x_penalty)
gradients = grad(p_penalty, x_penalty, grad_outputs=torch.ones_like(p_penalty).cuda(x_true.get_device()) if x_true.is_cuda else torch.ones_like(p_penalty), create_graph=True, retain_graph=True, only_inputs=True)[0]
penalty = ((gradients.view(len(x_true), -1).norm(2, 1) - 1)**2).mean()
return penalty
| 2,109 | 43.893617 | 222 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/models/resnet.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad, Variable
from .discriminator import Discriminator
class ResBlock(nn.Module):
def __init__(self, num_filters, resample=None, batchnorm=True, inplace=False):
super(ResBlock, self).__init__()
if resample == 'up':
conv_list = [nn.ConvTranspose2d(num_filters, num_filters, 4, stride=2, padding=1),
nn.Conv2d(num_filters, num_filters, 3, padding=1)]
self.conv_shortcut = nn.ConvTranspose2d(num_filters, num_filters, 1, stride=2, output_padding=1)
elif resample == 'down':
conv_list = [nn.Conv2d(num_filters, num_filters, 3, padding=1),
nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)]
self.conv_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2)
elif resample == None:
conv_list = [nn.Conv2d(num_filters, num_filters, 3, padding=1),
nn.Conv2d(num_filters, num_filters, 3, padding=1)]
self.conv_shortcut = None
else:
raise ValueError('Invalid resample value.')
self.block = []
for conv in conv_list:
if batchnorm:
self.block.append(nn.BatchNorm2d(num_filters))
self.block.append(nn.ReLU(inplace))
self.block.append(conv)
self.block = nn.Sequential(*self.block)
def forward(self, x):
shortcut = x
if not self.conv_shortcut is None:
shortcut = self.conv_shortcut(x)
return shortcut + self.block(x)
class ResNet32Generator(nn.Module):
def __init__(self, n_in, n_out, num_filters=128, batchnorm=True):
super(ResNet32Generator, self).__init__()
self.num_filters = num_filters
self.input = nn.Linear(n_in, 4*4*num_filters)
self.network = [ResBlock(num_filters, resample='up', batchnorm=batchnorm, inplace=True),
ResBlock(num_filters, resample='up', batchnorm=batchnorm, inplace=True),
ResBlock(num_filters, resample='up', batchnorm=batchnorm, inplace=True)]
if batchnorm:
self.network.append(nn.BatchNorm2d(num_filters))
self.network += [nn.ReLU(True),
nn.Conv2d(num_filters, 3, 3, padding=1),
nn.Tanh()]
self.network = nn.Sequential(*self.network)
def forward(self, z):
x = self.input(z).view(len(z), self.num_filters, 4, 4)
return self.network(x)
class ResNet32Discriminator(Discriminator):
def __init__(self, n_in, n_out, num_filters=128, batchnorm=False):
super(ResNet32Discriminator, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(3, num_filters, 3, padding=1),
nn.ReLU(),
nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1))
self.shortcut1 = nn.Conv2d(3, num_filters, 1, stride=2)
self.network = nn.Sequential(ResBlock(num_filters, resample='down', batchnorm=batchnorm),
ResBlock(num_filters, resample=None, batchnorm=batchnorm),
ResBlock(num_filters, resample=None, batchnorm=batchnorm),
nn.ReLU())
self.output = nn.Linear(num_filters, 1)
def forward(self, x):
y = self.block1(x)
y = self.shortcut1(x) + y
y = self.network(y).mean(-1).mean(-1)
y = self.output(y)
return y
| 4,785 | 41.732143 | 109 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/models/dcgan.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
class DCGAN32Generator(nn.Module):
def __init__(self, n_in, n_out, n_filters=128, activation=F.relu, batchnorm=True):
super(DCGAN32Generator, self).__init__()
self.n_in = n_in
self.n_filters = n_filters
self.activation = activation
self.batchnorm = batchnorm
self.deconv1 = nn.Linear(n_in, n_filters*4*4*4)
self.deconv1_bn = nn.BatchNorm1d(n_filters*4*4*4)
self.deconv2 = nn.ConvTranspose2d(n_filters*4, n_filters*2, 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d(n_filters*2)
self.deconv3 = nn.ConvTranspose2d(n_filters*2, n_filters, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(n_filters)
self.deconv5 = nn.ConvTranspose2d(n_filters, n_out, 4, 2, 1)
def forward(self, z):
x = self.deconv1(z)
if self.batchnorm:
x = self.deconv1_bn(x)
x = self.activation(x).view(-1,self.n_filters*4,4,4)
x = self.deconv2(x)
if self.batchnorm:
x = self.deconv2_bn(x)
x = self.activation(x)
x = self.deconv3(x)
if self.batchnorm:
x = self.deconv3_bn(x)
x = self.activation(x)
x = F.tanh(self.deconv5(x))
return x
class DCGAN32Discriminator(Discriminator):
def __init__(self, n_in, n_out, n_filters=128, activation=F.leaky_relu, batchnorm=True):
super(DCGAN32Discriminator, self).__init__()
self.n_filters = n_filters
self.activation = activation
self.batchnorm = batchnorm
self.conv1 = nn.Conv2d(n_in, n_filters, 4, 2, 1)
self.conv2 = nn.Conv2d(n_filters, n_filters*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(n_filters*2)
self.conv3 = nn.Conv2d(n_filters*2, n_filters*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(n_filters*4)
self.conv5 = nn.Linear(n_filters*4*4*4, 1)
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.conv2(x)
if self.batchnorm:
x = self.conv2_bn(x)
x = self.activation(x)
x = self.conv3(x)
if self.batchnorm:
x = self.conv3_bn(x)
x = self.activation(x).view(-1, self.n_filters*4*4*4)
x = self.conv5(x)
return x
| 3,533 | 35.43299 | 92 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/tflib/inception_score.py | # From https://github.com/openai/improved-gan/blob/master/inception_score/model.py
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import math
import sys
import torch
import torch.autograd as autograd
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
MODEL_DIR = 'tflib/model'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.9
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session(config=config) as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in range(n_batches):
# sys.stdout.write(".")
# sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session(config=config) as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o._shape = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3), w)
softmax = tf.nn.softmax(logits)
if softmax is None:
_init_inception()
| 3,708 | 34.32381 | 90 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/optim/extragradient.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import math
import torch
from torch.optim import Optimizer
required = object()
class Extragradient(Optimizer):
"""Base class for optimizers with extrapolation step.
Arguments:
params (iterable): an iterable of :class:`torch.Tensor` s or
:class:`dict` s. Specifies what Tensors should be optimized.
defaults: (dict): a dict containing default values of optimization
options (used when a parameter group doesn't specify them).
"""
def __init__(self, params, defaults):
super(Extragradient, self).__init__(params, defaults)
self.params_copy = []
def update(self, p, group):
raise NotImplementedError
def extrapolation(self):
"""Performs the extrapolation step and save a copy of the current parameters for the update step.
"""
# Check if a copy of the parameters was already made.
is_empty = len(self.params_copy) == 0
for group in self.param_groups:
for p in group['params']:
u = self.update(p, group)
if is_empty:
# Save the current parameters for the update step. Several extrapolation step can be made before each update but only the parameters before the first extrapolation step are saved.
self.params_copy.append(p.data.clone())
if u is None:
continue
# Update the current parameters
p.data.add_(u)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
if len(self.params_copy) == 0:
raise RuntimeError('Need to call extrapolation before calling step.')
loss = None
if closure is not None:
loss = closure()
i = -1
for group in self.param_groups:
for p in group['params']:
i += 1
u = self.update(p, group)
if u is None:
continue
# Update the parameters saved during the extrapolation step
p.data = self.params_copy[i].add_(u)
# Free the old parameters
self.params_copy = []
return loss
class ExtraSGD(Extragradient):
"""Implements stochastic gradient descent with extrapolation step (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.ExtraSGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.extrapolation()
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(ExtraSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def update(self, p, group):
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
if p.grad is None:
return None
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
return -group['lr']*d_p
class ExtraAdam(Extragradient):
"""Implements the Adam algorithm with extrapolation step.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(ExtraAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(ExtraAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def update(self, p, group):
if p.grad is None:
return None
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
return -step_size*exp_avg/denom
| 10,809 | 39.335821 | 199 | py |
Variational-Inequality-GAN | Variational-Inequality-GAN-master/optim/omd.py | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# written by Hugo Berard (berard.hugo@gmail.com) while at Facebook.
import math
import torch
from torch.optim import Optimizer
required = object()
class OMD(Optimizer):
def __init__(self, params, lr=required):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(lr=lr)
super(OMD, self).__init__(params, defaults)
def __setstate__(self, state):
super(OMD, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['previous_update'] = torch.zeros_like(d_p)
p.data.add_(-2*group['lr'], d_p).add_(group['lr']*state['previous_update'])
state['previous_update'] = d_p
return loss
class OptimisticAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(OptimisticAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(OptimisticAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
return None
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-2*step_size, exp_avg, denom)
if state['step'] > 1:
p.data.addcdiv_(group['lr'], state['exp_avg_previous'], state['exp_avg_sq_previous'])
state['exp_avg_previous'] = exp_avg.clone()/(bias_correction1)
state['exp_avg_sq_previous'] = denom.clone()/math.sqrt(bias_correction2)
return loss
| 5,917 | 39.534247 | 116 | py |
JamBot | JamBot-master/polyphonic_lstm_training.py | # Author: Jonas Wiesendanger wjonas@student.ethz.ch
from settings import *
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Activation
from keras.layers.embeddings import Embedding
from keras.optimizers import RMSprop, Adam
# from keras.utils import to_categorical
from keras.utils import np_utils
from keras.layers.wrappers import Bidirectional
from random import shuffle
import progressbar
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import numpy as np
import _pickle as pickle
import data_class
import chord_model
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
# Uncomment next block if you only want to use a fraction of the GPU memory:
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.4
#set_session(tf.Session(config=config))
# Path to the fully trained chord model for the chord embeddings:
chord_model_path = 'models/chords/1523433134-Shifted_True_Lr_1e-05_EmDim_10_opt_Adam_bi_False_lstmsize_512_trainsize_4_testsize_1_samples_per_bar8/model_Epoch10_4.pickle'
# Path where the polyphonic models are saved:
model_path = 'models/chords_mldy/'
model_filetype = '.pickle'
epochs = 100
train_set_size = 4
test_set_size = 1
test_step = 360 # Calculate error for test set every this many songs
verbose = False
show_plot = False
save_plot = True
lstm_size = 512
batch_size = 1
learning_rate = 1e-06
step_size = 1
save_step = 1
shuffle_train_set = True
bidirectional = False
embedding = False
optimizer = 'Adam'
fd = {'shifted': shifted, 'next_chord_feature': next_chord_feature, 'chord_embed_method': chord_embed_method, 'counter': counter_feature, 'highcrop': high_crop, 'lowcrop':low_crop, 'lr': learning_rate, 'opt': optimizer,
'bi': bidirectional, 'lstms': lstm_size, 'trainsize': train_set_size, 'testsize': test_set_size}
model_name = 'Shifted_%(shifted)s_NextChord_%(next_chord_feature)s_ChordEmbed_%(chord_embed_method)s_Counter_%(counter)s_Highcrop_%(highcrop)s_Lowcrop_%(lowcrop)s_Lr_%(lr)s_opt_%(opt)s_bi_%(bi)s_lstmsize_%(lstms)s_trainsize_%(trainsize)s_testsize_%(testsize)s' % fd
model_path = model_path + model_name + '/'
if not os.path.exists(model_path):
os.makedirs(model_path)
print('loading data...')
# Get Train and test sets
train_set, test_set, chord_train_set, chord_test_set = data_class.get_ind_train_and_test_set(train_set_size, test_set_size)
if chord_embed_method == 'embed':
chord_dim = chord_embedding_dim
elif chord_embed_method == 'onehot':
chord_dim = num_chords
elif chord_embed_method == 'int':
chord_dim = 1
if next_chord_feature:
chord_dim = chord_dim*2
# Load model for chord embeddings
chord_embed_model = chord_model.Embed_Chord_Model(chord_model_path)
# Build Melody Model
print('creating model...')
model = Sequential()
# model.add(LSTM(lstm_size, batch_size=batch_size, input_shape=(step_size, new_num_notes+chord_dim+counter_size), stateful=True))
model.add(LSTM(lstm_size, batch_input_shape=(batch_size,step_size, new_num_notes+chord_dim+counter_size), stateful=True))
model.add(Dense(new_num_notes))
model.add(Activation('sigmoid'))
if optimizer == 'RMS': optimizer = RMSprop(lr=learning_rate)
if optimizer == 'Adam': optimizer = Adam(lr=learning_rate)
loss = 'categorical_crossentropy'
model.compile(optimizer, loss)
# initialize loss arrays
total_test_loss_array = []
total_train_loss_array = []
total_test_loss = 0
total_train_loss = 0
# Test function
def test():
print('\nTesting:')
total_test_loss = 0
bar = progressbar.ProgressBar(maxval=test_set_size, redirect_stdout=False)
for i, test_song in enumerate(test_set):
X_test, Y_test = make_feature_vector(test_song, chord_test_set[i], chord_embed_method)
loss = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
model.reset_states()
total_test_loss += loss
bar.update(i)
total_test_loss_array.append(total_test_loss/test_set_size)
print('\nTotal test loss: ', total_test_loss/test_set_size)
print('-'*50)
plt.plot(total_test_loss_array, 'b-')
plt.plot(total_train_loss_array, 'r-')
# plt.axis([0, epochs, 0, 5])
if show_plot: plt.show()
if save_plot: plt.savefig(model_path+'plot.png')
pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))
# Make feature vectors with the notes and the chord information
def make_feature_vector(song, chords, chord_embed_method):
if next_chord_feature:
X = np.array(data_class.make_one_hot_note_vector(song[:(((len(chords)-1)*fs*2)-1)], num_notes))
else:
X = np.array(data_class.make_one_hot_note_vector(song[:((len(chords)*fs*2)-1)], num_notes))
# print(X.shape)
X = X[:,low_crop:high_crop]
# print(X.shape)
if chord_embed_method == 'embed':
X_chords = list(chord_embed_model.embed_chords_song(chords))
elif chord_embed_method == 'onehot':
X_chords = data_class.make_one_hot_vector(chords, num_chords)
elif chord_embed_method == 'int':
X_chords = [[x] for x in chords]
X_chords_new = []
Y = X[1:]
for j, _ in enumerate(X):
ind = int(((j+1)/(fs*2)))
if next_chord_feature:
ind2 = int(((j+1)/(fs*2)))+1
# print(j)
# print(ind, ' ', ind2)
# print(X_chords[ind].shape)
X_chords_new.append(list(X_chords[ind])+list(X_chords[ind2]))
else:
X_chords_new.append(X_chords[ind])
X_chords_new = np.array(X_chords_new)
X = np.append(X, X_chords_new, axis=1)
if counter_feature:
counter = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]]
if next_chord_feature:
counter = np.array(counter*(len(X_chords)-1))[:-1]
else:
counter = np.array(counter*len(X_chords))[:-1]
X = np.append(X, counter, axis=1)
X = X[:-1]
X = np.reshape(X, (X.shape[0], 1, X.shape[1]))
return X, Y
# Save Parameters to text file
with open(model_path + 'params.txt', "w") as text_file:
text_file.write("Chord Model: %s" % chord_model_path + '\n')
text_file.write("epochs: %s" % epochs + '\n')
text_file.write("train_set_size: %s" % train_set_size + '\n')
text_file.write("test_set_size: %s" % test_set_size + '\n')
text_file.write("lstm_size: %s" % lstm_size + '\n')
text_file.write("learning_rate: %s" % learning_rate + '\n')
text_file.write("save_step: %s" % save_step + '\n')
text_file.write("shuffle_train_set: %s" % shuffle_train_set + '\n')
text_file.write("test_step: %s" % test_step + '\n')
text_file.write("bidirectional: %s" % bidirectional + '\n')
text_file.write("num_chords: %s" % num_chords + '\n')
text_file.write("chord_n: %s" % chord_n + '\n')
# Train model
print('training model...')
for e in range(1, epochs+1):
print('Epoch ', e, 'of ', epochs, 'Epochs\nTraining:')
# Shuffle training set order
if shuffle_train_set:
# Zip lists together an shuffle and unzip again
ziperoni = list(zip(train_set, chord_train_set))
shuffle(ziperoni)
train_set, chord_train_set = zip(*ziperoni)
bar = progressbar.ProgressBar(maxval=train_set_size)
# Train model with each song seperatly
for i, song in enumerate(train_set):
X, Y = make_feature_vector(song, chord_train_set[i], chord_embed_method)
hist = model.fit(X, Y, batch_size=batch_size, shuffle=False, verbose=verbose)
model.reset_states()
bar.update(i)
total_train_loss += hist.history['loss'][0]
if (i+1)%test_step is 0:
total_train_loss = total_train_loss/test_step
total_train_loss_array.append(total_train_loss)
test()
total_train_loss = 0
if e%save_step is 0:
print('saving model')
model_save_path = model_path + 'model' + 'Epoch' + str(e) + model_filetype
model.save(model_save_path)
| 8,195 | 34.025641 | 265 | py |
JamBot | JamBot-master/chord_model.py | from settings import *
from keras.models import load_model
import keras
import numpy as np
from numpy import array
import _pickle as pickle
from keras import backend as K
from data_processing import get_chord_dict
class Chord_Model:
def __init__(self,
model_path,
prediction_mode='sampling',
first_chords=[1,3,2,1,1,3,2,1],
resample='none',
dim_factor=2,
temperature=1.0):
print('loading chord model ...')
self.model = keras.models.load_model(model_path)
self.model.reset_states()
self.embed_layer_output = K.function([self.model.layers[0].input], [self.model.layers[0].output])
self.embed_model = keras.models.Model(inputs=self.model.input,outputs=self.model.get_layer(name="embedding").output)
self.chord_to_index, self.index_to_chords = get_chord_dict()
self.prediction_mode = prediction_mode
self.temperature = temperature
self.resample = resample
self.dim_factor = dim_factor
self.song = []
for chord in first_chords[:-1]:
# print(chord)
self.model.predict(array([[chord]]))
self.song.append(chord)
chord = first_chords[-1]
self.song.append(chord)
self.current_chord = array([[chord]])
def predict_next(self):
prediction = self.model.predict(self.current_chord)[0]
if self.resample=='hard':
prediction[self.current_chord] = 0
prediction = prediction/np.sum(prediction)
elif self.resample=='soft':
prediction[self.current_chord] /= self.dim_factor
prediction = prediction/np.sum(prediction)
# print(prediction)
prediction = np.log(prediction) / self.temperature
prediction = np.exp(prediction) / np.sum(np.exp(prediction))
if self.prediction_mode == 'argmax':
# print('argmax')
while True:
next_chord = np.argmax(prediction)
if next_chord !=0:
break
# print(next_chord)
elif self.prediction_mode == 'sampling':
while True:
next_chord = np.random.choice(len(prediction), p=prediction)
# print(next_chord)
if next_chord !=0:
break
# print(next_chord)
self.song.append(next_chord)
self.current_chord = np.array([next_chord])
return self.current_chord[0]
def embed_chord(self, chord):
return self.embed_layer_output([[[chord]]])[0][0][0]
def embed_chords_song(self, chords):
embeded_chords = []
for chord in chords:
embeded_chords.append(self.embed_chord(chord))
return embeded_chords
class Embed_Chord_Model:
def __init__(self, model_path):
print('loading chord model ...')
model = keras.models.load_model(model_path)
model.reset_states()
self.embed_layer_output = K.function([model.layers[0].input], [model.layers[0].output])
self.chord_to_index, self.index_to_chords = get_chord_dict()
def embed_chord(self, chord):
return self.embed_layer_output([[[chord]]])[0][0][0]
def embed_chords_song(self, chords):
embeded_chords = []
for chord in chords:
embeded_chords.append(self.embed_chord(chord))
return embeded_chords
if __name__=="__main__":
# Paths:
model_folder = 'models/chords/standart_lr_0.00003/'
model_name = 'modelEpoch10'
model = Chord_Model(model_folder + model_name + '.h5', prediction_mode='sampling')
for i in range(0, 16):
model.predict_next()
print(model.song)
| 4,054 | 27.356643 | 124 | py |
JamBot | JamBot-master/generation.py | from settings import *
from keras.models import load_model
import numpy as np
from numpy import array
import _pickle as pickle
import os
import data_processing
import chord_model
import midi_functions as mf
import data_class
chord_model_folder = 'models/chords/1523433134-Shifted_True_Lr_1e-05_EmDim_10_opt_Adam_bi_False_lstmsize_512_trainsize_4_testsize_1_samples_per_bar8/'
chord_model_name = 'model_Epoch10_4.pickle'
melody_model_folder = 'models/chords_mldy/Shifted_True_NextChord_True_ChordEmbed_embed_Counter_True_Highcrop_84_Lowcrop_24_Lr_1e-06_opt_Adam_bi_False_lstmsize_512_trainsize_4_testsize_1/'
melody_model_name = 'modelEpoch2.pickle'
midi_save_folder = 'predicted_midi/'
seed_path = 'data/' + shift_folder + 'indroll/'
seed_chord_path = 'data/' + shift_folder + 'chord_index/'
seed_name = 'Piano Concerto n2 op19 1mov.mid.pickle'
# Parameters for song generation:
BPM = 100
note_cap = 5
chord_temperature = 1
# Params for seed:
# length of the predicted song in bars:
num_bars =64
# The first seed_length number of bars from the seed will be used:
seed_length = 4
#pred_song_length = 8*16-seed_length
with_seed = True
chord_to_index, index_to_chord = data_processing.get_chord_dict()
def sample_probability_vector(prob_vector):
# Sample a probability vector, e.g. [0.1, 0.001, 0.5, 0.9]
sum_probas = sum(prob_vector)
if sum_probas > note_cap:
prob_vector = (prob_vector/sum_probas)*note_cap
note_vector = np.zeros((prob_vector.size), dtype=np.int8)
for i, prob in enumerate(prob_vector):
note_vector[i] = np.random.multinomial(1, [1 - prob, prob])[1]
return note_vector
def ind_to_onehot(ind):
onehot = np.zeros((len(ind), num_notes))
for i, step in enumerate(ind):
for note in step:
onehot[i,note]=1
return onehot
sd = pickle.load(open(seed_path+seed_name, 'rb'))[:8*seed_length]
seed_chords = pickle.load(open(seed_chord_path+seed_name, 'rb'))[:seed_length]
seed = ind_to_onehot(sd)[:,low_crop:high_crop]
print('loading polyphonic model ...')
melody_model = load_model(melody_model_folder+melody_model_name)
melody_model.reset_states()
ch_model = chord_model.Chord_Model(
chord_model_folder+chord_model_name,
prediction_mode='sampling',
first_chords=seed_chords,
temperature=chord_temperature)
chords = []
for i in range((num_bars+2)):
ch_model.predict_next()
if chord_embed_method == 'embed':
embedded_chords = ch_model.embed_chords_song(ch_model.song)
elif chord_embed_method == 'onehot':
embedded_chords = data_class.make_one_hot_vector(ch_model.song, num_chords)
elif chord_embed_method == 'int':
embedded_chords = [[x] for x in ch_model.song]
chords = []
for j in range((len(ch_model.song)-2)*fs*2):
ind = int(((j+1)/(fs*2)))
if next_chord_feature:
ind2 = int(((j+1)/(fs*2)))+1
# print(j)
# print(ind, ' ', ind2)
chords.append(list(embedded_chords[ind])+list(embedded_chords[ind2]))
else:
chords.append(embedded_chords[ind])
# print(ind)
chords=np.array(chords)
if counter_feature:
counter = [[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]]
counter = np.array(counter*(len(ch_model.song)-2))
chords = np.append(chords, counter, axis=1)
seed = np.append(seed, chords[:seed.shape[0]], axis=1)
seed = np.reshape(seed, (seed.shape[0], 1, 1, seed.shape[1]))
next_step = None
for step in seed:
next_step = melody_model.predict(step)
notes = sample_probability_vector(next_step[0])
rest = []
rest.append(notes)
for chord in chords[seed.shape[0]:]:
next_input = np.append(notes, chord, axis=0)
next_input = np.reshape(next_input, (1, 1, next_input.shape[0]))
next_step = melody_model.predict(next_input)
notes = sample_probability_vector(next_step[0])
rest.append(notes)
rest = np.array(rest)
rest = np.pad(rest, ((0,0),(low_crop,num_notes-high_crop)), mode='constant', constant_values=0)
ind = np.nonzero(rest)
#rest = np.reshape(rest, (rest.shape[1], rest.shape[0]))
#note_ind = mf.pianoroll_to_note_index(rest)
#print(ch_model.song)
instrument_names = ['Electric Guitar (jazz)', 'Acoustic Grand Piano',
'Bright Acoustic Piano', 'Electric Piano 1', 'Electric Piano 2', 'Drawbar Organ',
'Rock Organ', 'Church Organ', 'Reed Organ', 'Cello', 'Viola', 'Honky-tonk Piano', 'Glockenspiel',
'Percussive Organ', 'Accordion', 'Acoustic Guitar (nylon)', 'Acoustic Guitar (steel)', 'Electric Guitar (clean)',
'Electric Guitar (muted)', 'Overdriven Guitar', 'Distortion Guitar', 'Tremolo Strings', 'Pizzicato Strings',
'Orchestral Harp', 'String Ensemble 1', 'String Ensemble 2', 'SynthStrings 1', 'SynthStrings 2']
for instrument_name in instrument_names:
mf.pianoroll_to_midi_continous(rest, midi_save_folder, instrument_name, instrument_name, BPM)
# mf.pianoroll_to_midi(rest, 'test/midi/', instrument_name, instrument_name, BPM)
| 4,976 | 28.625 | 187 | py |
JamBot | JamBot-master/chord_lstm_training.py | # Author: Jonas Wiesendanger, Andres Konrad, Gino Brunner (brunnegi@ethz.ch)
from settings import *
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense, Activation
from keras.layers import Embedding
from keras.optimizers import RMSprop, Adam
import keras.utils
from keras.utils import np_utils
from keras.layers.wrappers import Bidirectional
from random import shuffle
import progressbar
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import _pickle as pickle
import os
import data_class
import time
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
# Uncomment next block if you only want to use a fraction of the GPU memory:
# config = tf.ConfigProto()
# config.gpu_options.visible_device_list = "2"
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
# set_session(tf.Session(config=config))
#Path where the models are saved:
model_path = 'models/chords/'
model_filetype = '.pickle'
epochs = 20
train_set_size = 4
test_set_size = 1
test_step = 800 # Calculate error for test set every this many songs
verbose = False
show_plot = False
save_plot = True
lstm_size = 512
batch_size = 1
learning_rate = 0.00001
step_size = 1
save_step = 10
shuffle_train_set = True
bidirectional = False
optimizer = 'Adam'
fd = {'shifted': shifted, 'lr': learning_rate, 'emdim': chord_embedding_dim, 'opt': optimizer,
'bi': bidirectional, 'lstms': lstm_size, 'trainsize': train_set_size, 'testsize': test_set_size, 'samples_per_bar': samples_per_bar}
t = str(int(round(time.time())))
model_name = t+ '-Shifted_%(shifted)s_Lr_%(lr)s_EmDim_%(emdim)s_opt_%(opt)s_bi_%(bi)s_lstmsize_%(lstms)s_trainsize_%(trainsize)s_testsize_%(testsize)s_samples_per_bar%(samples_per_bar)s' % fd
model_path = model_path + model_name + '/'
if not os.path.exists(model_path):
os.makedirs(model_path)
print('loading data...')
train_set, test_set = data_class.get_chord_train_and_test_set(train_set_size, test_set_size)
print('creating model...')
model = Sequential()
model.add(Embedding(num_chords, chord_embedding_dim, input_length=step_size, name="embedding", batch_input_shape=(batch_size,step_size)))
# model.add(Embedding(num_chords, chord_embedding_dim, input_length=step_size))
# if bidirectional: model.add(Bidirectional(LSTM(lstm_size, stateful=True)))
# else: model.add(LSTM(lstm_size, stateful=True))
model.add(LSTM(lstm_size, stateful=True))
model.add(Dense(num_chords))
model.add(Activation('softmax'))
if optimizer == 'Adam':
optimizer = Adam(lr=learning_rate)
elif optimizer == 'RMS':
optimizer = RMSprop(lr=learning_rate)
loss = 'categorical_crossentropy'
print("compiling model")
model.compile(optimizer, loss)
total_test_loss_array = []
total_train_loss_array = []
total_test_loss = 0
def test():
print('\nTesting:')
total_test_loss = 0
bar = progressbar.ProgressBar(maxval=test_set_size, redirect_stdout=False)
for i, test_song in enumerate(test_set):
X_test = test_song[:-1]
Y_test = np_utils.to_categorical(test_song[1:], num_classes=num_chords)
loss = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=verbose)
model.reset_states()
total_test_loss += loss
bar.update(i+1)
total_test_loss_array.append(total_test_loss/test_set_size)
print('\nTotal test loss: ', total_test_loss/test_set_size)
print('-'*50)
plt.plot(total_test_loss_array, 'b-', label='test loss')
plt.plot(total_train_loss_array, 'r-', label='train loss')
# plt.legend()
plt.ylabel(model_path)
# plt.axis([0, 50, 3, 5])
plt.grid()
if show_plot: plt.show()
if save_plot: plt.savefig(model_path+'plot.png')
pickle.dump(total_test_loss_array,open(model_path+'total_test_loss_array.pickle', 'wb'))
pickle.dump(total_train_loss_array,open(model_path+'total_train_loss_array.pickle', 'wb'))
def train():
print('training model...')
total_train_loss = 0
for e in range(1, epochs+1):
print('Epoch ', e, 'of ', epochs, 'Epochs\nTraining:')
if shuffle_train_set:
shuffle(train_set)
bar = progressbar.ProgressBar(maxval=train_set_size)
for i, song in enumerate(train_set):
# bar.start()
X = song[:-1]
Y = np_utils.to_categorical(song[1:], num_classes=num_chords)
hist = model.fit(X, Y, batch_size=batch_size, shuffle=False, verbose=verbose)
model.reset_states()
bar.update(i+1)
# print(hist.history)
total_train_loss += hist.history['loss'][0]
if (i+1)%test_step is 0:
total_train_loss = total_train_loss/test_step
total_train_loss_array.append(total_train_loss)
test()
total_train_loss = 0
if e%save_step is 0:
print('saving model')
model_save_path = model_path + 'model_' + 'Epoch' + str(e) + '_' + str(i+1) + model_filetype
model.save(model_save_path)
def save_params():
with open(model_path + 'params.txt', "w") as text_file:
text_file.write("epochs: %s" % epochs + '\n')
text_file.write("train_set_size: %s" % train_set_size + '\n')
text_file.write("test_set_size: %s" % test_set_size + '\n')
text_file.write("lstm_size: %s" % lstm_size + '\n')
text_file.write("embedding_dim: %s" % chord_embedding_dim + '\n')
text_file.write("learning_rate: %s" % learning_rate + '\n')
#text_file.write("save_step: %s" % save_step + '\n')
text_file.write("shuffle_train_set: %s" % shuffle_train_set + '\n')
text_file.write("test_step: %s" % test_step + '\n')
text_file.write("bidirectional: %s" % bidirectional + '\n')
text_file.write("num_chords: %s" % num_chords + '\n')
text_file.write("chord_n: %s" % chord_n + '\n')
print("saving params")
save_params()
print("starting training..")
train()
| 5,989 | 35.975309 | 191 | py |
DSRE | DSRE-main/main.py | # coding:utf-8
import torch
import numpy as np
import json
import sys
import os
import argparse
import logging
import framework
import encoder
import model1
import model2
parser = argparse.ArgumentParser()
parser.add_argument('--pretrain_path', default='bert-base-uncased',
help='Pre-trained ckpt path / model name (hugginface)')
parser.add_argument('--ckpt', default='verified_nyt10_Passage_Level',
help='Checkpoint name')
parser.add_argument('--only_test', action='store_true',
help='Only run test')
parser.add_argument('--mask_entity', action='store_true',
help='Mask entity mentions')
parser.add_argument('--metric', default='auc', choices=['micro_f1', 'auc','p@10','p@30'],
help='Metric for picking up best checkpoint')
parser.add_argument('--train_file', default='nyt10/nyt10_train.txt', type=str,
help='Training data file')
parser.add_argument('--val_file', default='nyt10/nyt10_test.txt', type=str,
help='Validation data file')
parser.add_argument('--test_file', default='nyt10/nyt10_test.txt', type=str,
help='Test data file')
parser.add_argument('--rel2id_file', default='nyt10/nyt10_rel2id.json', type=str,
help='Relation to ID file')
parser.add_argument('--batch_size', default=16, type=int,
help='Batch size')
parser.add_argument('--lr', default=2e-5, type=float,
help='Learning rate')
parser.add_argument('--optim', default='adamw', type=str,
help='Optimizer')
parser.add_argument('--weight_decay', default=1e-5, type=float,
help='Weight decay')
parser.add_argument('--max_length', default=512, type=int,
help='Maximum sentence length')
parser.add_argument('--max_epoch', default=5, type=int,
help='Max number of training epochs')
parser.add_argument('--save_name', default='', type=str,
help='name for saving checkpoint')
parser.add_argument('--seed', default=772, type=int,
help='random seed')
parser.add_argument(
"--devs",
nargs="*",
type=int,
default=[0,1],
help='list of gpu ids on which model needs to be run'
)
args = parser.parse_args()
import os
import random
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=args.seed)
# Some basic settings
root_path = '.'
sys.path.append(root_path)
if not os.path.exists('ckpt'):
os.mkdir('ckpt')
ckpt = 'ckpt/{}.pth.tar'.format(args.ckpt)
print(ckpt)
if not (os.path.exists(args.train_file) and os.path.exists(args.val_file) and os.path.exists(args.test_file) and os.path.exists(args.rel2id_file)):
raise Exception('--train_file, --val_file, --test_file and --rel2id_file are not specified or files do not exist. Or specify --dataset')
logging.info('Arguments:')
for arg in vars(args):
logging.info(' {}: {}'.format(arg, getattr(args, arg)))
rel2id = json.load(open(args.rel2id_file))
# Define the passage encoder
passage_encoder = encoder.PassageEncoder(
pretrain_path=args.pretrain_path,
batch_size = args.batch_size,
mask_entity=args.mask_entity
)
# Define the model
# For NYT, use separate fully connected layers for NA and non-NA (a shared layer for all the non-NA labels)
if 'nyt' in args.test_file:
model_ = model2.PassageAttention(passage_encoder, len(rel2id), rel2id)
else:
model_ = model1.PassageAttention(passage_encoder, len(rel2id), rel2id)
framework_ = framework.PassageRE(
model=model_,
train_path=args.train_file,
val_path=args.val_file,
test_path=args.test_file,
ckpt=ckpt,
batch_size=args.batch_size,
max_epoch=args.max_epoch,
lr=args.lr,
weight_decay=args.weight_decay,
opt='adamw',
warmup_step = 30000 // args.batch_size,
devices = args.devs)
if not args.only_test:
framework_.train_model(args.metric)
# Test
framework_.load_state_dict(torch.load(ckpt, map_location='cuda:0')['state_dict'])
result = framework_.eval_model(framework_.test_loader)
# Print the result
#print(result)
#pred = result['pred']
#with open("prediction.txt", "w") as f:
# for key in pred:
# f.write(key)
# preds = pred[key]['prediction']
# for p in preds:
# f.write("\t"+p)
# f.write("\n")
print('Test set results for ckpt = ' +str(ckpt)+ ' are:')
print("AUC: %.4f" % result['auc'])
print("Average P@M: %.4f" % result['avg_p300'])
print("Micro F1: %.4f" % (result['max_micro_f1']))
print("Macro F1: %.4f" % (result['max_macro_f1']))
| 4,541 | 32.644444 | 147 | py |
DSRE | DSRE-main/encoder/passage_encoder.py | import logging
import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer
class PassageEncoder(nn.Module):
def __init__(self, pretrain_path, batch_size, blank_padding=True, mask_entity=False):
super().__init__()
self.blank_padding = blank_padding
self.hidden_size = 768
self.batch_size = batch_size
self.mask_entity = mask_entity
self.max_length = 512
logging.info('Loading BERT pre-trained checkpoint.')
self.bert = BertModel.from_pretrained(pretrain_path)
self.tokenizer = BertTokenizer.from_pretrained(pretrain_path)
self.linear = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, token, att_mask):
hidden, _ = self.bert(token, attention_mask=att_mask)
return hidden
def tokenize(self, bag, data):
max_len = 0
indexed_tokens = []
for it, sent_id in enumerate(bag):
item = data[sent_id]
if 'text' in item:
sentence = item['text']
is_token = False
else:
sentence = item['token']
is_token = True
pos_head = item['h']['pos']
pos_tail = item['t']['pos']
pos_min = pos_head
pos_max = pos_tail
if pos_head[0] > pos_tail[0]:
pos_min = pos_tail
pos_max = pos_head
rev = True
else:
rev = False
if not is_token:
sent0 = self.tokenizer.tokenize(sentence[:pos_min[0]])
ent0 = self.tokenizer.tokenize(sentence[pos_min[0]:pos_min[1]])
sent1 = self.tokenizer.tokenize(sentence[pos_min[1]:pos_max[0]])
ent1 = self.tokenizer.tokenize(sentence[pos_max[0]:pos_max[1]])
sent2 = self.tokenizer.tokenize(sentence[pos_max[1]:])
else:
sent0 = self.tokenizer.tokenize(' '.join(sentence[:pos_min[0]]))
ent0 = self.tokenizer.tokenize(' '.join(sentence[pos_min[0]:pos_min[1]]))
sent1 = self.tokenizer.tokenize(' '.join(sentence[pos_min[1]:pos_max[0]]))
ent1 = self.tokenizer.tokenize(' '.join(sentence[pos_max[0]:pos_max[1]]))
sent2 = self.tokenizer.tokenize(' '.join(sentence[pos_max[1]:]))
sent_temp = " ".join(sentence)
if self.mask_entity:
#print("Mask")
ent0 = ['[unused5]'] if not rev else ['[unused6]']
ent1 = ['[unused6]'] if not rev else ['[unused5]']
else:
ent0 = ['[unused1]'] + ent0 + ['[unused2]'] if not rev else ['[unused3]'] + ent0 + ['[unused4]']
ent1 = ['[unused3]'] + ent1 + ['[unused4]'] if not rev else ['[unused1]'] + ent1 + ['[unused2]']
if it == 0:
re_tokens = ['[CLS]'] + sent0 + ent0 + sent1 + ent1 + sent2 + ['[SEP]']
else:
re_tokens = sent0 + ent0 + sent1 + ent1 + sent2 + ['[SEP]']
curr_indexed_tokens = self.tokenizer.convert_tokens_to_ids(re_tokens)
curr_len = len(curr_indexed_tokens)
if max_len + curr_len <= self.max_length:
indexed_tokens += curr_indexed_tokens
max_len += curr_len
else:
if max_len == 0:
indexed_tokens = curr_indexed_tokens[:self.max_length]
max_len = len(indexed_tokens)
break
# Padding
if self.blank_padding:
while len(indexed_tokens) < self.max_length:
indexed_tokens.append(0) # 0 is id for [PAD]
indexed_tokens = indexed_tokens[:self.max_length]
indexed_tokens = torch.tensor(indexed_tokens).long().unsqueeze(0) # (1, L)
# Attention mask
att_mask = torch.zeros(indexed_tokens.size()).long() # (1, L)
att_mask[0, :max_len] = 1
return indexed_tokens, att_mask
| 4,027 | 40.958333 | 112 | py |
DSRE | DSRE-main/model1/passage_att.py | import torch
from torch import nn, optim
from torch.nn import functional as F
class PassageAttention(nn.Module):
"""
token-level attention for passage-level relation extraction.
"""
def __init__(self,
passage_encoder,
num_class,
rel2id):
"""
Args:
passage_encoder: encoder for whole passage (bag of sentences)
num_class: number of classes
"""
super().__init__()
self.passage_encoder = passage_encoder
self.embed_dim = self.passage_encoder.hidden_size
self.num_class = num_class
self.fc = nn.Linear(self.embed_dim, 1)
self.relation_embeddings = nn.Parameter(torch.empty(self.num_class, self.embed_dim))
nn.init.xavier_normal_(self.relation_embeddings)
self.sigm = nn.Sigmoid()
self.softmax = nn.Softmax(-1)
self.rel2id = rel2id
self.id2rel = {}
for rel, id in rel2id.items():
self.id2rel[id] = rel
def forward(self, token, mask, train=True):
"""
Args:
token: (nsum, L), index of tokens
mask: (nsum, L), used for piece-wise CNN
Return:
logits, (B, N)
"""
batch_size = token.shape[0]
#max_len = token.shape[-1]
if mask is not None:
rep = self.passage_encoder(token, mask) # (B, max_len, H)
else:
rep = self.passage_encoder(token) # (nsum, H)
if train:
att_mat = self.relation_embeddings.repeat(batch_size,1,1) # (B, N, emb_dim)
att_scores = torch.bmm(rep, att_mat.transpose(1,2)).transpose(1,2) # (B, max_len, emb_dim)* (B, emb_dim, N) = (B, max_len, N) -> (B, N, max_len)
att_scores = self.softmax(att_scores) #(B, N, max_len)
rel_logits = torch.bmm(att_scores,rep) # (B, N, max_len) * (B, max_len, H) -> (B, N, H)
rel_scores = self.sigm(self.fc(rel_logits).squeeze(-1)) # (B, N, H) -> (B, N, 1) -> (B, N)
else:
with torch.no_grad():
att_mat = self.relation_embeddings.repeat(batch_size,1,1) # (B, N, emb_dim)
att_scores = torch.bmm(rep, att_mat.transpose(1,2)).transpose(1,2) # (B, max_len, emb_dim)* (B, emb_dim, N) = (B, max_len, N) -> (B, N, max_len)
att_scores = self.softmax(att_scores) #(B, N, max_len)
rel_logits = torch.bmm(att_scores,rep) # (B, N, max_len) * (B, max_len, H) -> (B, N, H)
rel_scores = self.sigm(self.fc(rel_logits).squeeze(-1)) # (B, N, H) -> (B, N, 1) -> (B, N)
return rel_scores
| 2,684 | 41.619048 | 163 | py |
DSRE | DSRE-main/model2/passage_att.py | import torch
from torch import nn, optim
from torch.nn import functional as F
import pdb
class PassageAttention(nn.Module):
"""
token-level attention for passage-level relation extraction.
"""
def __init__(self,
passage_encoder,
num_class,
rel2id):
"""
Args:
passage_encoder: encoder for whole passage (bag of sentences)
num_class: number of classes
"""
super().__init__()
self.passage_encoder = passage_encoder
self.embed_dim = self.passage_encoder.hidden_size
self.num_class = num_class
self.fc1 = nn.Linear(self.embed_dim, 1)
self.fc2 = nn.Linear(self.embed_dim, 1)
self.relation_embeddings = nn.Parameter(torch.empty(self.num_class, self.embed_dim))
nn.init.xavier_normal_(self.relation_embeddings)
self.sigm = nn.Sigmoid()
self.softmax = nn.Softmax(-1)
self.rel2id = rel2id
self.id2rel = {}
for rel, id in rel2id.items():
self.id2rel[id] = rel
def forward(self, token, mask, train=True):
"""
Args:
token: (nsum, L), index of tokens
mask: (nsum, L), used for piece-wise CNN
Return:
logits, (B, N)
"""
batch_size = token.shape[0]
#max_len = token.shape[-1]
if mask is not None:
rep = self.passage_encoder(token, mask) # (B, max_len, H)
else:
rep = self.passage_encoder(token) # (nsum, H)
if train:
att_mat = self.relation_embeddings.repeat(batch_size,1,1) # (B, N, emb_dim)
att_scores = torch.bmm(rep, att_mat.transpose(1,2)).transpose(1,2) # (B, max_len, emb_dim)* (B, emb_dim, N) = (B, max_len, N) -> (B, N, max_len)
att_scores = self.softmax(att_scores) #(B, N, max_len)
rel_logits = torch.bmm(att_scores,rep) # (B, N, max_len) * (B, max_len, H) -> (B, N, H)
#pdb.set_trace()
#separate linear layers for NA and non-NA
na_logits=self.fc1(rel_logits[:,0,:])
other_logits=self.fc2(rel_logits[:,1:,:])
final_logits=torch.cat((na_logits.unsqueeze(1),other_logits),1).squeeze(-1)
rel_scores = self.sigm(final_logits)
else:
with torch.no_grad():
att_mat = self.relation_embeddings.repeat(batch_size,1,1) # (B, N, emb_dim)
att_scores = torch.bmm(rep, att_mat.transpose(1,2)).transpose(1,2) # (B, max_len, emb_dim)* (B, emb_dim, N) = (B, max_len, N) -> (B, N, max_len)
att_scores = self.softmax(att_scores) #(B, N, max_len)
rel_logits = torch.bmm(att_scores,rep) # (B, N, max_len) * (B, max_len, H) -> (B, N, H)
#pdb.set_trace()
#separate linear layers for NA and non-NA
na_logits=self.fc1(rel_logits[:,0,:])
other_logits=self.fc2(rel_logits[:,1:,:])
final_logits=torch.cat((na_logits.unsqueeze(1),other_logits),1).squeeze(-1)
rel_scores = self.sigm(final_logits)
return rel_scores
| 3,204 | 41.733333 | 163 | py |
DSRE | DSRE-main/framework/passage_re.py | import torch
from torch import nn, optim
from .data_loader import PassageRELoader
from .utils import AverageMeter
from tqdm import tqdm
import pdb
class PassageRE(nn.Module):
def __init__(self,
model,
train_path,
val_path,
test_path,
ckpt,
batch_size=16,
max_epoch=5,
lr=2e-5,
weight_decay=1e-5,
opt='adamw',
warmup_step=0,
devices=[0,1]):
super().__init__()
self.max_epoch = max_epoch
# Load data path, rel2id, tokenizer, batch_size, shuffle, num_workers=1
if train_path != None:
self.train_loader = PassageRELoader(
path = train_path,
rel2id = model.rel2id,
tokenizer = model.passage_encoder.tokenize,
batch_size = batch_size,
shuffle = True)
if val_path != None:
self.val_loader = PassageRELoader(
path = val_path,
rel2id = model.rel2id,
tokenizer = model.passage_encoder.tokenize,
batch_size = batch_size,
shuffle = False)
if test_path != None:
self.test_loader = PassageRELoader(
path = test_path,
rel2id = model.rel2id,
tokenizer = model.passage_encoder.tokenize,
batch_size = batch_size,
shuffle = False)
# Model
self.device=torch.device('cuda:{}'.format(devices[0]))
self.model = nn.DataParallel(model, device_ids=devices)
self.model.to(self.device)
self.criterion = torch.nn.BCELoss(reduction='sum')
# Params and optimizer
params = self.model.parameters()
self.lr = lr
if opt == 'sgd':
self.optimizer = optim.SGD(params, lr, weight_decay=weight_decay)
elif opt == 'adam':
self.optimizer = optim.Adam(params, lr, weight_decay=weight_decay)
elif opt == 'adamw':
from transformers import AdamW
params = list(self.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
grouped_params = [
{
'params': [p for n, p in params if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01,
'lr': lr,
'ori_lr': lr
},
{
'params': [p for n, p in params if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
'lr': lr,
'ori_lr': lr
}
]
self.optimizer = AdamW(grouped_params, correct_bias=False)
else:
raise Exception("Invalid optimizer. Must be 'sgd' or 'adam' or 'bert_adam'.")
# Warmup
if warmup_step > 0:
from transformers import get_linear_schedule_with_warmup
training_steps = self.train_loader.dataset.__len__() // batch_size * self.max_epoch
self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=warmup_step,
num_training_steps=training_steps)
else:
self.scheduler = None
self.ckpt = ckpt
def train_model(self, metric='auc'):
best_metric = 0
bag_logits_dict = {}
for epoch in range(self.max_epoch):
# Train
self.train()
print("=== Epoch %d train ===" % epoch)
avg_loss = AverageMeter()
avg_acc = AverageMeter()
avg_pos_acc = AverageMeter()
t = tqdm(self.train_loader)
for iter, data in enumerate(t):
self.optimizer.zero_grad()
for i in range(len(data)):
try:
data[i] = data[i].to(self.device)
except:
pass
label = data[0]
bag_name = data[1]
token, mask = data[2].squeeze(1), data[3].squeeze(1)
rel_scores = self.model(token, mask)
label = torch.stack(label).to(self.device)
loss = self.criterion(rel_scores, label)
pred = (rel_scores >= 0.5)*torch.tensor([[1]*rel_scores.shape[1]]*rel_scores.shape[0]).to(self.device)
acc = float((pred.view(-1) == label.view(-1)).long().sum().item())/label.view(-1).shape[0]
pos_total = (label.view(-1) != 0).long().sum().item()
pos_correct = ((pred.view(-1) == label.view(-1)).long()*(label.view(-1) != 0).long()).sum()
if pos_total > 0:
pos_acc = float(pos_correct) / float(pos_total)
else:
pos_acc = 0
# Log
avg_loss.update(loss.item(), 1)
avg_acc.update(acc, 1)
avg_pos_acc.update(pos_acc, 1)
t.set_postfix(loss=avg_loss.avg, acc=avg_acc.avg, pos_acc=avg_pos_acc.avg)
# Optimize
loss.backward()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
# Val
print("=== Epoch %d val ===" % epoch)
result = self.eval_model(self.val_loader)
print("AUC: %.4f" % result['auc'])
print("Previous best auc on val set: %f" % (best_metric))
if result['auc'] > best_metric:
print("Best ckpt and saved.")
torch.save({'state_dict': self.model.module.state_dict()}, self.ckpt)
best_metric = result[metric]
print("Best %s on val set: %f" % (metric, best_metric))
def eval_model(self, eval_loader):
self.model.eval()
bag_logits_dict = {}
pred_result = []
with torch.no_grad():
t = tqdm(eval_loader)
for iter, data in enumerate(t):
for i in range(len(data)):
try:
data[i] = data[i].to(self.device)
except:
pass
label = data[0]
bag_name = data[1]
token, mask = data[2].squeeze(1), data[3].squeeze(1)
logits = self.model(token, mask, False)
for i in range(logits.shape[0]):
for relid in range(self.model.module.num_class):
if self.model.module.id2rel[relid] != 'NA':
pred_result.append({'entpair': bag_name[i][:2], 'relation': self.model.module.id2rel[relid],'score': logits[i][relid].item()})
#pred_result.append({'entpair': bag_name[i][:2], 'relation': self.model.module.id2rel[relid],
# 'score': logits[i][relid].item()})
result = eval_loader.dataset.eval(pred_result)
return result
def load_state_dict(self, state_dict):
self.model.module.load_state_dict(state_dict)
| 7,276 | 39.882022 | 154 | py |
DSRE | DSRE-main/framework/data_loader.py | import torch
import torch.utils.data as data
import os, random, json, logging
import numpy as np
import sklearn.metrics
class PassageREDataset(data.Dataset):
"""
Bag-level relation extraction dataset. Note that relation of NA should be named as 'NA'.
"""
def __init__(self, path, rel2id, tokenizer):
"""
Args:
path: path of the input file
rel2id: dictionary of relation->id mapping
tokenizer: function of tokenizing
"""
#seed = 42
#random.seed(seed)
#np.random.seed(seed)
#torch.manual_seed(seed)
#torch.cuda.manual_seed(seed)
#torch.backends.cudnn.deterministic = True
super().__init__()
self.tokenizer = tokenizer
self.rel2id = rel2id
self.num_classes = len(rel2id)
#self.bag_size = bag_size
self.id2rel = {}
for k,v in self.rel2id.items():
self.id2rel[v] = k
# Load the file
f = open(path)
self.data = []
for line in f:
line = line.rstrip()
if len(line) > 0:
self.data.append(eval(line))
f.close()
# Construct bag-level dataset (a bag contains instances sharing the same entity-pair)
self.weight = np.ones((len(self.rel2id)), dtype=np.float32)
self.bag_scope = []
self.rel_scope = []
self.name2id = {}
self.bag_name = []
self.facts = {}
self.bag2sents = []
for idx, item in enumerate(self.data):
fact = (item['h']['id'], item['t']['id'],item['relation'])
if item['relation'] != 'NA':
self.facts[fact] = 1
name = (item['h']['id'], item['t']['id'])
if 'text' in item:
sent = item['text'].lower().strip()
else:
sent = ' '.join(item['token']).lower().strip()
if name not in self.name2id:
self.name2id[name] = len(self.name2id)
self.bag_scope.append([])
self.rel_scope.append(set())
self.bag_name.append(name)
self.bag2sents.append(set())
if sent not in self.bag2sents[self.name2id[name]]:
self.bag_scope[self.name2id[name]].append(idx)
self.bag2sents[self.name2id[name]].add(sent)
rel_id = self.rel2id[item['relation']]
if rel_id not in self.rel_scope[self.name2id[name]]:
self.rel_scope[self.name2id[name]].add(rel_id)
self.weight[rel_id] += 1.0
self.weight = np.float32(1.0 / (self.weight ** 0.05))
self.weight = torch.from_numpy(self.weight)
def __len__(self):
return len(self.bag_scope)
def __getitem__(self, index):
bag = self.bag_scope[index]
random.shuffle(bag)
rel = torch.LongTensor(list(self.rel_scope[index]))
onehot_rel = torch.zeros(self.num_classes)
onehot_rel = onehot_rel.scatter_(0, rel, 1)
token, mask = self.tokenizer(bag, self.data)
seqs = [[], []]
seqs[0].append(token)
seqs[1].append(mask)
for i in range(len(seqs)):
seqs[i] = torch.cat(seqs[i], 0) # (n, L), n is the size of bag
return [onehot_rel, self.bag_name[index]] + seqs
def collate_bag_size_fn(data):
data = list(zip(*data))
label, bag_name = data[:2]
seqs = data[2:]
for i in range(len(seqs)):
seqs[i] = torch.stack(seqs[i], 0) # (batch, bag, L)
return [label, bag_name] + seqs
# def eval(self, pred_result):
# """
# Args:
# pred_result: a list with dict {'entpair': (head_id, tail_id), 'relation': rel, 'score': score}.
# Note that relation of NA should be excluded.
# Return:
# {'prec': narray[...], 'rec': narray[...], 'mean_prec': xx, 'f1': xx, 'auc': xx}
# prec (precision) and rec (recall) are in micro style.
# prec (precision) and rec (recall) are sorted in the decreasing order of the score.
# f1 is the max f1 score of those precison-recall points
# """
# sorted_pred_result = sorted(pred_result, key=lambda x: x['score'], reverse=True)
# prec = []
# rec = []
# correct = 0
# total = len(self.facts)
# P_10R = False # To check if recall has reached 0.1
# P_30R = False # To check if recall has reached 0.3
# p10_val = 0.0
# p30_val = 0.0
# for i, item in enumerate(sorted_pred_result):
# if (item['entpair'][0], item['entpair'][1], item['relation']) in self.facts:
# correct += 1
# prec_temp = float(correct) / float(i + 1)
# prec.append(prec_temp)
# rec_temp = float(correct) / float(total)
# rec.append(rec_temp)
# if not P_10R:
# if rec_temp >= 0.1:
# p10_val = prec_temp
# P_10R = True
# if not P_30R:
# if rec_temp >= 0.3:
# p30_val = prec_temp
# P_30R = True
# auc = np.around(sklearn.metrics.auc(x=rec, y=prec), 4)
# np_prec = np.array(prec)
# np_rec = np.array(rec)
# max_f1 = (2 * np_prec * np_rec / (np_prec + np_rec + 1e-20)).max()
# def prec_at_n(n):
# correct = 0
# for i, item in enumerate(sorted_pred_result[:n]):
# if (item['entpair'][0], item['entpair'][1], item['relation']) in self.facts:
# correct += 1
# return (correct / n)
# prec_at_all = prec_at_n(len(sorted_pred_result))
# prec_at_100 = prec_at_n(100)
# prec_at_200 = prec_at_n(200)
# prec_at_300 = prec_at_n(300)
# mean_prec = np_prec.mean()
# # return {'micro_p': np_prec, 'micro_r': np_rec, 'micro_p_mean': mean_prec, 'micro_f1': f1, 'auc': auc,
# # 'p@10': p10_val, 'p@30': p30_val}
# return {'prec': prec, 'rec': rec, 'auc': auc, 'p@all': prec_at_all, 'p@100': prec_at_100,
# 'p@200': prec_at_200, 'p@300': prec_at_300,'max_f1':max_f1, 'p@r10': p10_val, 'p@r30': p30_val}
def eval(self, pred_result, threshold=0.5):
"""
Args:
pred_result: a list with dict {'entpair': (head_id, tail_id), 'relation': rel, 'score': score}.
Note that relation of NA should be excluded.
Return:
{'prec': narray[...], 'rec': narray[...], 'mean_prec': xx, 'f1': xx, 'auc': xx}
prec (precision) and rec (recall) are in micro style.
prec (precision) and rec (recall) are sorted in the decreasing order of the score.
f1 is the max f1 score of those precison-recall points
"""
sorted_pred_result = sorted(pred_result, key=lambda x: x['score'], reverse=True)
prec = []
rec = []
correct = 0
total = len(self.facts)
entpair = {}
for i, item in enumerate(sorted_pred_result):
# Save entpair label and result for later calculating F1
idtf = item['entpair'][0] + '#' + item['entpair'][1]
if idtf not in entpair:
entpair[idtf] = {
'label': np.zeros((len(self.rel2id)), dtype=np.int),
'pred': np.zeros((len(self.rel2id)), dtype=np.int),
'score': np.zeros((len(self.rel2id)), dtype=np.float),
'prediction':[]
}
if (item['entpair'][0], item['entpair'][1], item['relation']) in self.facts:
correct += 1
entpair[idtf]['label'][self.rel2id[item['relation']]] = 1
if item['score'] >= threshold:
entpair[idtf]['pred'][self.rel2id[item['relation']]] = 1
if item['relation'] not in entpair[idtf]['prediction']:
entpair[idtf]['prediction'].append(item['relation'])
entpair[idtf]['score'][self.rel2id[item['relation']]] = item['score']
prec.append(float(correct) / float(i + 1))
rec.append(float(correct) / float(total))
auc = sklearn.metrics.auc(x=rec, y=prec)
np_prec = np.array(prec)
np_rec = np.array(rec)
with open('rec.npy', 'wb') as f:
np.save(f, np_rec)
with open('prec.npy', 'wb') as f:
np.save(f, np_prec)
max_micro_f1 = (2 * np_prec * np_rec / (np_prec + np_rec + 1e-20)).max()
best_threshold = sorted_pred_result[(2 * np_prec * np_rec / (np_prec + np_rec + 1e-20)).argmax()]['score']
mean_prec = np_prec.mean()
label_vec = []
pred_result_vec = []
score_vec = []
for ep in entpair:
label_vec.append(entpair[ep]['label'])
pred_result_vec.append(entpair[ep]['pred'])
score_vec.append(entpair[ep]['score'])
label_vec = np.stack(label_vec, 0)
pred_result_vec = np.stack(pred_result_vec, 0)
score_vec = np.stack(score_vec, 0)
micro_p = sklearn.metrics.precision_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='micro')
micro_r = sklearn.metrics.recall_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='micro')
micro_f1 = sklearn.metrics.f1_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='micro')
macro_p = sklearn.metrics.precision_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='macro')
macro_r = sklearn.metrics.recall_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='macro')
macro_f1 = sklearn.metrics.f1_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='macro')
pred_result_vec = score_vec >= best_threshold
max_macro_f1 = sklearn.metrics.f1_score(label_vec, pred_result_vec, labels=list(range(1, len(self.rel2id))), average='macro')
max_micro_f1_each_relation = {}
for rel in self.rel2id:
if rel != 'NA':
max_micro_f1_each_relation[rel] = sklearn.metrics.f1_score(label_vec, pred_result_vec, labels=[self.rel2id[rel]], average='micro')
return {'np_prec': np_prec, 'np_rec': np_rec, 'max_micro_f1': max_micro_f1, 'max_macro_f1': max_macro_f1, 'auc': auc, 'p@100': np_prec[99], 'p@200': np_prec[199], 'p@300': np_prec[299], 'avg_p300': (np_prec[99] + np_prec[199] + np_prec[299]) / 3, 'micro_f1': micro_f1, 'macro_f1': macro_f1, 'max_micro_f1_each_relation': max_micro_f1_each_relation, 'pred':entpair}
def PassageRELoader(path, rel2id, tokenizer, batch_size, shuffle, num_workers=1):
collate_fn = PassageREDataset.collate_bag_size_fn
dataset = PassageREDataset(path, rel2id, tokenizer)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
num_workers=num_workers,
collate_fn = collate_fn)
return data_loader
| 11,370 | 43.592157 | 372 | py |
CmpLoss | CmpLoss-main/run_squad.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for question answering using a slightly adapted version of the 🤗 Trainer.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import json
import logging
import os
import sys
import datasets
import transformers
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from arguments import CmpTrainingArguments, DataArguments, ModelArguments, DATA_ARGS_NAME
from modeling import CmpQA
from trainer import QATrainer
from utils.qa import postprocess_squad_predictions
from utils.data import prepare_squad_features
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.19.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
logger = logging.getLogger(__name__)
# noinspection PyArgumentList
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataArguments, CmpTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: CmpTrainingArguments
# Update some training arguments
if training_args.do_train:
if 'large' in model_args.model_name_or_path:
# For large models, try not to optimize the full model directly to avoid overfitting
training_args.loss_target = 'first'
training_args.run_name = f"{model_args.abs}#{data_args.abs}#{training_args.abs}"
training_args.output_dir = os.path.join(training_args.output_dir, training_args.run_name)
training_args.logging_dir = os.path.join(training_args.logging_dir, training_args.run_name)
# Setup logging
log_level = training_args.get_process_log_level()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
level=log_level,
)
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
logger.info(f"Data parameters {data_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(set(os.listdir(training_args.output_dir)) - {DATA_ARGS_NAME}) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = datasets.load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
data_files = {}
extension = None
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = datasets.load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.disable_dropout:
config.attention_probs_dropout_prob = 0.
config.hidden_dropout_prob = 0.
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.n_crop + training_args.n_drop > 0:
model = CmpQA.from_pretrained(
model_args,
training_args,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models at"
" https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet"
" this requirement"
)
# Preprocessing the datasets.
# Preprocessing is slightly different for training and evaluation.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
else:
column_names = raw_datasets["test"].column_names
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
train_dataset = None
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
# Select samples from Dataset. This will help to decrease processing time
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Create training features
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
function=prepare_squad_features,
fn_kwargs={"tokenizer": tokenizer, "max_seq_len": max_seq_length,
"doc_stride": data_args.doc_stride, "pad_to_max_length": data_args.pad_to_max_length},
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
new_fingerprint=(f"cr-{tokenizer.name_or_path.replace('/', '_')}-{data_args.doc_stride}-"
f"{max_seq_length}{'padded' if data_args.pad_to_max_length else ''}-train"),
desc="Running tokenizer on train dataset",
)
if data_args.max_train_samples is not None:
# Select samples from dataset again since Feature Creation might increase number of features
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Validation preprocessing
eval_examples, eval_dataset = None, None
if training_args.do_eval or training_args.do_train:
if "validation" not in raw_datasets:
raise ValueError("Evaluation requires a validation dataset")
eval_examples = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
# Select eval samples from dataset
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples)
eval_examples = eval_examples.select(range(max_eval_samples))
# Create validation features
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
function=prepare_squad_features,
fn_kwargs={"tokenizer": tokenizer, "max_seq_len": max_seq_length,
"doc_stride": data_args.doc_stride, "pad_to_max_length": data_args.pad_to_max_length},
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
new_fingerprint=(f"cr-{tokenizer.name_or_path.replace('/', '_')}-{data_args.doc_stride}-"
f"{max_seq_length}{'padded' if data_args.pad_to_max_length else ''}-val"),
desc="Running tokenizer on validation dataset",
)
if data_args.max_eval_samples is not None:
# Select Samples from Dataset again since Feature Creation might increase samples size
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
# Prediction preprocessing
predict_examples, predict_dataset = None, None
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = raw_datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Create prediction features
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = eval_examples.map(
function=prepare_squad_features,
fn_kwargs={"tokenizer": tokenizer, "max_seq_len": max_seq_length,
"doc_stride": data_args.doc_stride, "pad_to_max_length": data_args.pad_to_max_length},
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
new_fingerprint=(f"cr-{tokenizer.name_or_path.replace('/', '_')}-{data_args.doc_stride}-"
f"{max_seq_length}{'padded' if data_args.pad_to_max_length else ''}-test"),
desc="Running tokenizer on prediction dataset",
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
# Data collator
# We have already padded to max length if the corresponding flag is True,
# otherwise we need to pad in the data collator.
data_collator = (
default_data_collator if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Post-processing:
def post_processing_function(examples, features, predictions, output_dir=None, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_squad_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=output_dir,
prefix=stage,
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0}
for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = datasets.load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
# Initialize our Trainer
trainer = QATrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=lambda p: metric.compute(predictions=p.predictions, references=p.label_ids),
)
# Training
if training_args.do_train:
with open(os.path.join(training_args.output_dir, DATA_ARGS_NAME), 'w') as f:
json.dump(data_args.to_dict(), f, ensure_ascii=False, indent=2)
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = len(train_dataset) if data_args.max_train_samples is None else data_args.max_train_samples
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = len(eval_dataset) if data_args.max_eval_samples is None else data_args.max_eval_samples
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
len(predict_dataset) if data_args.max_predict_samples is None else data_args.max_predict_samples
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 20,046 | 45.838785 | 119 | py |
CmpLoss | CmpLoss-main/run_glue.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import json
import logging
import os
import random
import sys
import datasets
import numpy as np
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from arguments import CmpTrainingArguments, DataArguments, ModelArguments, DATA_ARGS_NAME, task2criterion, task2keys
from modeling import CmpCls
from trainer import CmpTrainer
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.19.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
logger = logging.getLogger(__name__)
# noinspection PyArgumentList
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataArguments, CmpTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: CmpTrainingArguments
# Update some training arguments
if training_args.do_train:
if 'large' in model_args.model_name_or_path:
# For large models, try not to optimize the full model directly to avoid overfitting
training_args.loss_target = 'first'
if data_args.task_name is not None:
training_args.metric_for_best_model = task2criterion[data_args.task_name]
training_args.greater_is_better = training_args.metric_for_best_model not in ["loss", "eval_loss"]
training_args.run_name = f"{model_args.abs}#{data_args.abs}#{training_args.abs}"
training_args.output_dir = os.path.join(training_args.output_dir, training_args.run_name)
training_args.logging_dir = os.path.join(training_args.logging_dir, training_args.run_name)
# Setup logging
log_level = training_args.get_process_log_level()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
level=log_level,
)
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
logger.info(f"Data parameters {data_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(set(os.listdir(training_args.output_dir)) - {DATA_ARGS_NAME}) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = datasets.load_dataset(
"glue",
data_args.task_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
elif data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = datasets.load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
raw_datasets = datasets.load_dataset(
"csv",
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
# Loading a dataset from local json files
raw_datasets = datasets.load_dataset(
"json",
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
label_list = None
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.disable_dropout:
config.attention_probs_dropout_prob = 0.
config.hidden_dropout_prob = 0.
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = CmpCls.from_pretrained(
model_args,
training_args,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the raw_datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task2keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {lid: label for label, lid in config.label2id.items()}
elif data_args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {lid: label for label, lid in config.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_glue(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
with training_args.main_process_first(desc="dataset map pre-processing"):
raw_datasets._check_values_type()
raw_datasets = datasets.DatasetDict(
{
k: dataset.map(
function=preprocess_glue,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
new_fingerprint=f"{tokenizer.name_or_path.replace('/', '_')}-{data_args.abs}-{k}",
desc="Running tokenizer on dataset",
)
for k, dataset in raw_datasets.items()
}
)
train_dataset = None
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
eval_dataset = None
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
predict_dataset = None
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = datasets.load_metric("glue", data_args.task_name)
else:
metric = datasets.load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if
# we already did the padding.
data_collator = (
default_data_collator if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Initialize our Trainer
trainer = CmpTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
with open(os.path.join(training_args.output_dir, DATA_ARGS_NAME), 'w') as f:
json.dump(data_args.to_dict(), f, ensure_ascii=False, indent=2)
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = len(train_dataset) if data_args.max_train_samples is None else data_args.max_train_samples
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
combined = None
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(raw_datasets["validation_mismatched"])
combined = {}
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = len(eval_dataset) if data_args.max_eval_samples is None else data_args.max_eval_samples
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
if task == "mnli-mm":
metrics = {k + "_mm": v for k, v in metrics.items()}
if task is not None and "mnli" in task:
combined.update(metrics)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
predict_datasets.append(raw_datasets["test_mismatched"])
for predict_dataset, task in zip(predict_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
predict_dataset = predict_dataset.remove_columns("label")
predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
logger.info(f"***** Predict results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if data_args.task_name is not None:
kwargs["language"] = "en"
kwargs["dataset_tags"] = "glue"
kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 22,570 | 44.690283 | 119 | py |
CmpLoss | CmpLoss-main/run_hotpot.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for question answering using a slightly adapted version of the 🤗 Trainer.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import json
import logging
import os
import sys
from torch.utils.data import Subset
import datasets
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from arguments import CmpTrainingArguments, DataArguments, ModelArguments, DATA_ARGS_NAME
from modeling import CmpQA
from trainer import QATrainer
from utils.qa import postprocess_hotpot_predictions
from utils.data import load_corpus, MultiDocQACollator, MultiDocQADataset
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.19.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
logger = logging.getLogger(__name__)
# noinspection PyArgumentList
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataArguments, CmpTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: CmpTrainingArguments
# Update some training arguments
if training_args.do_train:
if 'large' in model_args.model_name_or_path:
# For large models, try not to optimize the full model directly to avoid overfitting
training_args.loss_target = 'first'
training_args.run_name = f"{model_args.abs}#{data_args.abs}#{training_args.abs}"
training_args.output_dir = os.path.join(training_args.output_dir, training_args.run_name)
training_args.logging_dir = os.path.join(training_args.logging_dir, training_args.run_name)
training_args.max_ans_len = data_args.max_answer_length
training_args.n_top_span = data_args.n_best_size + 5
# Setup logging
log_level = training_args.get_process_log_level()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
level=log_level,
)
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
logger.info(f"Data parameters {data_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(set(os.listdir(training_args.output_dir)) - {DATA_ARGS_NAME}) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.disable_dropout:
config.attention_probs_dropout_prob = 0.
config.hidden_dropout_prob = 0.
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = CmpQA.from_pretrained(
model_args,
training_args,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models at"
" https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet"
" this requirement"
)
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Get the datasets
corpus = load_corpus(data_args.corpus_file, tokenizer, data_args.overwrite_cache)
train_dataset = None
if training_args.do_train:
if not data_args.train_file:
raise ValueError("--do_train requires a train file")
train_dataset = MultiDocQADataset(
data_args.train_file, tokenizer, corpus,
max_seq_length, data_args.max_q_len, data_args.max_p_len, data_args.max_p_num,
'train', training_args.n_crop, data_args.overwrite_cache
)
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = Subset(train_dataset, list(range(max_train_samples)))
eval_examples, eval_dataset = None, None
if training_args.do_eval or training_args.do_train:
if not data_args.validation_file:
raise ValueError("--do_eval and --do_train requires a dev file")
eval_dataset = MultiDocQADataset(
data_args.validation_file, tokenizer, corpus,
tokenizer.model_max_length, data_args.max_q_len, data_args.max_p_len, data_args.max_p_num,
'validation', training_args.n_crop, data_args.overwrite_cache
)
eval_examples = eval_dataset.examples
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_examples = eval_examples[:max_eval_samples]
eval_dataset = Subset(eval_dataset, list(range(max_eval_samples)))
predict_examples, predict_dataset = None, None
if training_args.do_predict:
if not data_args.test_file:
raise ValueError("--do_predict requires a test file")
predict_dataset = MultiDocQADataset(
data_args.test_file, tokenizer, corpus,
tokenizer.model_max_length, data_args.max_q_len, data_args.max_p_len, data_args.max_p_num,
'validation', 0, data_args.overwrite_cache
)
predict_examples = predict_dataset.examples
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_examples = predict_examples[:max_predict_samples]
predict_dataset = Subset(predict_dataset, list(range(max_predict_samples)))
# Data collator
data_collator = MultiDocQACollator(tokenizer.pad_token_id, pad_to_multiple_of=int(config.attention_window[0]))
# Post-processing:
def post_processing_function(examples, features, predictions, output_dir=None, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_hotpot_predictions(
examples=examples,
features=features,
predictions=predictions,
n_best_size=data_args.n_best_size,
output_dir=output_dir,
prefix=stage,
)
# Format the result to the format the metric expects.
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": q_id, "answers": {"text": features.examples[q_id]['answers']['texts'], "answer_start": []}}
for q_id in features.q_ids]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = datasets.load_metric("squad")
# Initialize our Trainer
trainer = QATrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
eval_examples=eval_examples,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=lambda p: metric.compute(predictions=p.predictions, references=p.label_ids),
)
# Training
if training_args.do_train:
with open(os.path.join(training_args.output_dir, DATA_ARGS_NAME), 'w') as f:
json.dump(data_args.to_dict(), f, ensure_ascii=False, indent=2)
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = len(train_dataset) if data_args.max_train_samples is None else data_args.max_train_samples
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = len(eval_dataset) if data_args.max_eval_samples is None else data_args.max_eval_samples
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
len(predict_dataset) if data_args.max_predict_samples is None else data_args.max_predict_samples
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 13,817 | 42.866667 | 120 | py |
CmpLoss | CmpLoss-main/modeling.py | from dataclasses import dataclass
import json
import logging
import os
from typing import Any, Dict, Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers import AutoModel, LongformerModel, RobertaModel, PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
)
from arguments import CmpTrainingArguments, ModelArguments, MODEL_ARGS_NAME
from utils.ranking import list_mle, pairwise_hinge
from utils.tensor import mask_where0
logger = logging.getLogger(__name__)
HEAD_WEIGHTS_NAME = 'head.bin'
def get_objective(effects: torch.tensor, strategy: str = 'ultra') -> torch.tensor:
with torch.no_grad():
if strategy == 'first':
first_effect = effects[:, 0:1].detach()
best_effect = effects[:, 1:].max(dim=1, keepdim=True)[0].detach().clone()
return torch.where(first_effect > best_effect, (first_effect + best_effect) / 2, best_effect)
elif strategy == 'best':
return effects.max(dim=1, keepdim=True)[0].detach().clone()
else:
return effects.new_zeros((effects.size(0), 1))
def get_primary(losses: torch.tensor, strategy: str = 'max') -> torch.tensor:
if strategy == 'avg':
return losses.mean(dim=1)
elif strategy == 'fst':
return losses[:, 0]
elif strategy == 'lst':
return losses[:, -1]
else:
return losses.max(dim=1)[0]
def get_rng_states():
torch_rng_state = torch.get_rng_state()
random_rng_state = torch.random.get_rng_state()
cuda_rng_state = torch.cuda.get_rng_state()
return torch_rng_state, random_rng_state, cuda_rng_state
def set_rng_states(torch_rng_state, random_rng_state, cuda_rng_state):
torch.set_rng_state(torch_rng_state)
torch.random.set_rng_state(random_rng_state)
torch.cuda.set_rng_state(cuda_rng_state)
@dataclass
class QAModelOutput(QuestionAnsweringModelOutput):
pred_starts: Optional[torch.LongTensor] = None
pred_ends: Optional[torch.LongTensor] = None
class CmpBase(nn.Module):
def __init__(
self,
encoder: PreTrainedModel,
model_args: ModelArguments,
training_args: Optional[CmpTrainingArguments] = None
):
super(CmpBase, self).__init__()
self.args = model_args
self.training_args = training_args
self.encoder = encoder
self.config = self.encoder.config
self.dropout_names = []
def _init_weights(self, module):
# copied from transformers/models/bert/modeling_bert.py
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@classmethod
def from_pretrained(
cls,
model_args: Optional[ModelArguments] = None,
training_args: Optional[CmpTrainingArguments] = None,
*args,
**kwargs
):
model_name_or_path = model_args.model_name_or_path
encoder = AutoModel.from_pretrained(model_name_or_path, *args, **kwargs)
model_args_path = os.path.join(model_name_or_path, MODEL_ARGS_NAME)
if model_args is None and os.path.exists(model_args_path):
with open(model_args_path) as f:
model_args_dict = json.load(f)
model_args = ModelArguments(**model_args_dict)
model = cls(encoder, model_args, training_args)
head_weights_path = os.path.join(model_name_or_path, HEAD_WEIGHTS_NAME)
if os.path.exists(head_weights_path):
logger.info(f"loading extra weights from {head_weights_path}")
model_dict = torch.load(head_weights_path, map_location="cpu")
model.load_state_dict(model_dict, strict=False)
return model
def save_pretrained(self, save_directory: Union[str, os.PathLike], *args, **kwargs):
self.encoder.save_pretrained(save_directory, *args, **kwargs)
model_dict = self.state_dict()
encoder_parameter_keys = [k for k in model_dict.keys() if k.startswith('encoder')]
for k in encoder_parameter_keys:
model_dict.pop(k)
if len(model_dict) > 0:
torch.save(model_dict, os.path.join(save_directory, HEAD_WEIGHTS_NAME))
with open(os.path.join(save_directory, MODEL_ARGS_NAME), 'w') as f:
json.dump(self.args.to_dict(), f, ensure_ascii=False, indent=2)
def floating_point_ops(self, input_dict: Dict[str, Union[torch.Tensor, Any]],
exclude_embeddings: bool = True) -> int:
return self.encoder.floating_point_ops(input_dict, exclude_embeddings)
def activate_dropout(self):
for name in self.dropout_names:
module = self.get_submodule(name)
module.train()
def deactivate_dropout(self):
for name in self.dropout_names:
module = self.get_submodule(name)
module.eval()
def power_dropout(self, power: int = 1):
for name in self.dropout_names:
module = self.get_submodule(name)
module.__setattr__('orig_p', module.p)
if power != 1: # and not name.endswith('.self.dropout'):
module.p = 1 - (1 - module.p) ** power
module.__setattr__('orig_mode', module.training)
module.train(True)
def close_dropout(self):
for name in self.dropout_names:
module = self.get_submodule(name)
module.__setattr__('orig_p', module.p)
module.p = 0
module.__setattr__('orig_mode', module.training)
def restore_dropout(self):
for name in self.dropout_names:
module = self.get_submodule(name)
module.p = module.orig_p
module.__delattr__('orig_p')
module.train(module.orig_mode)
module.__delattr__('orig_mode')
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
**kwargs
) -> BaseModelOutputWithPoolingAndCrossAttentions:
if isinstance(self.encoder, RobertaModel):
outputs = self.encoder(input_ids, attention_mask, **kwargs)
elif isinstance(self.encoder, LongformerModel):
outputs = self.encoder(input_ids, attention_mask, global_attention_mask, **kwargs)
else:
outputs = self.encoder(input_ids, attention_mask, token_type_ids, **kwargs)
return outputs
class CmpQA(CmpBase):
def __init__(
self,
encoder: PreTrainedModel,
model_args: ModelArguments,
training_args: Optional[CmpTrainingArguments] = None
):
super(CmpQA, self).__init__(encoder, model_args, training_args)
self.answerer = nn.Linear(self.config.hidden_size, 2)
# self.qa_dropout = nn.Dropout(self.config.hidden_dropout_prob)
self.span_mask = None
self.dropout_names = [name for name, module in self.named_modules() if isinstance(module, nn.Dropout)]
self._init_weights(self.answerer)
def get_span_mask(self, seq_len: int, device) -> torch.Tensor:
if self.span_mask is not None and seq_len <= self.span_mask.size(0):
return self.span_mask[:seq_len, :seq_len].to(device)
self.span_mask = torch.tril(
torch.triu(torch.ones((seq_len, seq_len), device=device), 0), self.training_args.max_ans_len - 1
)
self.span_mask[:4, :] = 0
self.span_mask[1, 1] = 1
self.span_mask[2, 2] = 1
# self.span_mask[3, 3] = 1
return self.span_mask
def _forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
answer_mask: Optional[torch.Tensor] = None,
**kwargs
):
extra_padding_shape = None
if attention_mask is not None:
max_seq_len = attention_mask.sum(dim=1).max().item()
if isinstance(self.encoder, LongformerModel) and max_seq_len % 512 != 0:
max_seq_len = (max_seq_len // 512 + 1) * 512
if max_seq_len < attention_mask.size(1):
extra_padding_shape = attention_mask[:, max_seq_len:].shape
attention_mask = attention_mask[:, :max_seq_len]
if input_ids is not None:
input_ids = input_ids[:, :max_seq_len]
if token_type_ids is not None:
token_type_ids = token_type_ids[:, :max_seq_len]
if global_attention_mask is not None:
global_attention_mask = global_attention_mask[:, :max_seq_len]
if answer_mask is not None:
answer_mask = answer_mask[:, :max_seq_len]
encoder_outputs = super().forward(input_ids, attention_mask, token_type_ids, global_attention_mask,
return_dict=True, **kwargs)
# (B, L, H)
seq_hidden_states = encoder_outputs.last_hidden_state
# seq_hidden_states = self.qa_dropout(seq_hidden_states)
# (B, L)
start_logits, end_logits = self.answerer(seq_hidden_states).split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
if answer_mask is not None:
start_logits = mask_where0(start_logits, answer_mask)
end_logits = mask_where0(end_logits, answer_mask)
if extra_padding_shape is not None:
start_logits = torch.cat([start_logits, start_logits.new_full(extra_padding_shape, -1000)], dim=-1)
end_logits = torch.cat([end_logits, end_logits.new_full(extra_padding_shape, -1000)], dim=-1)
return start_logits, end_logits, encoder_outputs
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
answer_mask: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
**kwargs
) -> QuestionAnsweringModelOutput:
n_drop, n_crop = self.training_args.n_drop, self.training_args.n_crop
if n_crop == 0:
if input_ids is not None and input_ids.ndim == 2:
input_ids.unsqueeze_(1)
if attention_mask is not None and attention_mask.ndim == 2:
attention_mask.unsqueeze_(1)
if token_type_ids is not None and token_type_ids.ndim == 2:
token_type_ids.unsqueeze_(1)
if global_attention_mask is not None and global_attention_mask.ndim == 2:
global_attention_mask.unsqueeze_(1)
if answer_mask is not None and answer_mask.ndim == 2:
answer_mask.unsqueeze_(1)
assert input_ids is None or input_ids.size(1) == 1 + n_crop
if n_drop > 0 and self.training:
self.close_dropout()
elif self.eval() and self.training_args.force_dropout > 0:
self.activate_dropout()
self.power_dropout(self.training_args.force_dropout)
rng_states = get_rng_states()
start_logits, end_logits, encoder_outputs = self._forward(
input_ids[:, 0], attention_mask[:, 0], token_type_ids[:, 0],
global_attention_mask[:, 0] if global_attention_mask is not None else None,
answer_mask[:, 0] if answer_mask is not None else None,
**kwargs
)
if n_drop > 0 and self.training:
self.restore_dropout()
elif self.eval() and self.training_args.force_dropout > 0:
self.restore_dropout()
self.deactivate_dropout()
bsz, seq_len = start_logits.shape
# (B, L, L)
span_scores = start_logits[:, :, None] + end_logits[:, None]
span_scores = mask_where0(span_scores, self.get_span_mask(seq_len, start_logits.device).unsqueeze(0))
# (B, K)
top_spans = span_scores.view(bsz, -1).argsort(dim=-1,
descending=True)[:, :self.training_args.n_top_span].squeeze(-1)
pred_starts = torch.div(top_spans, seq_len, rounding_mode='floor') # top_spans // seq_len
pred_ends = top_spans % seq_len
loss = None
if start_positions is not None and end_positions is not None:
if n_crop == 0 and start_positions.ndim == 1:
start_positions.unsqueeze_(1)
if n_crop == 0 and end_positions.ndim == 1:
end_positions.unsqueeze_(1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
start_positions[start_positions > seq_len] = -100
end_positions[end_positions > seq_len] = -100
# reading comprehension losses
loss_fct = CrossEntropyLoss(ignore_index=-100, reduction='none')
# (1 + CMP, B)
all_start_losses = [loss_fct(start_logits, start_positions[:, 0])]
all_end_losses = [loss_fct(end_logits, end_positions[:, 0])]
n_cmp = n_drop + n_crop if self.training or self.training_args.cmp_in_eval else 0
dc, cc = 0, 0
for i in range(n_cmp):
if cc < n_crop and (i % 2 == 0 or dc >= n_drop): # crop first
cc += 1
else:
dc += 1
if n_drop > 0:
self.power_dropout(dc)
set_rng_states(*rng_states)
_start_logits, _end_logits, _ = self._forward(
input_ids[:, cc], attention_mask[:, cc], token_type_ids[:, cc],
global_attention_mask[:, cc] if global_attention_mask is not None else None,
answer_mask[:, cc] if answer_mask is not None else None,
**kwargs
)
if n_drop > 0:
self.restore_dropout()
all_start_losses.append(loss_fct(_start_logits, start_positions[:, cc]))
all_end_losses.append(loss_fct(_end_logits, end_positions[:, cc]))
assert n_cmp == 0 or dc == n_drop and cc == n_crop
# (B, 1 + CMP)
all_start_losses = torch.stack(all_start_losses, dim=1)
all_end_losses = torch.stack(all_end_losses, dim=1)
all_rc_losses: torch.Tensor = (all_start_losses + all_end_losses) / 2
loss = {
f"rc_{i}": all_rc_losses[:, i] # (B,)
for i in range(1 + n_cmp)
}
loss['rc'] = get_primary(all_rc_losses, self.training_args.loss_primary) # (B,)
# comparative loss
if n_cmp > 0:
# (B, 1 + CMP) log likelihood
start_effects = -all_start_losses / self.training_args.cr_temp # .mean(dim=0, keepdim=True)
end_effects = -all_end_losses / self.training_args.cr_temp # .mean(dim=0, keepdim=True)
effect_labels = torch.arange(
end_effects.size(-1), 0, -1, device=end_effects.device
)[None, :].expand_as(end_effects)
# (B, 1 + CMP + 1) log likelihood
start_effects_ = torch.cat(
[start_effects, get_objective(start_effects, self.training_args.loss_target)],
dim=1
)
end_effects_ = torch.cat(
[end_effects, get_objective(end_effects, self.training_args.loss_target)],
dim=1
)
effect_labels_ = torch.arange(
end_effects_.size(-1), 0, -1, device=end_effects_.device
)[None, :].expand_as(end_effects_)
cmp_fct = list_mle if self.training_args.listwise else pairwise_hinge
# (B,)
loss['cmp'] = (cmp_fct(start_effects_, effect_labels_, reduction='none') +
cmp_fct(end_effects_, effect_labels_, reduction='none')) / 2
loss['pair_reg'] = (pairwise_hinge(start_effects, effect_labels, reduction='none') +
pairwise_hinge(end_effects, effect_labels, reduction='none')) / 2
loss['list_reg'] = (list_mle(start_effects, effect_labels, reduction='none') +
list_mle(end_effects, effect_labels, reduction='none')) / 2
if self.training_args.just_cmp:
loss['overall'] = loss['cmp']
else:
loss['overall'] = loss['rc']
if not (self.training_args.cr_schedule and loss['rc'].mean() > 1.1):
loss['overall'] += (loss['list_reg'] if self.training_args.listwise else
loss['pair_reg']) * self.training_args.cr_weight
else:
loss['overall'] = loss['rc']
# average in batch
for k, v in loss.items():
loss[k] = v.mean()
return QAModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
pred_starts=pred_starts,
pred_ends=pred_ends,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class CmpCls(CmpBase):
"""
Cmp Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
"""
def __init__(
self,
encoder: PreTrainedModel,
model_args: ModelArguments,
training_args: Optional[CmpTrainingArguments] = None
):
super(CmpCls, self).__init__(encoder, model_args, training_args)
classifier_dropout = (
self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(self.config.hidden_size, self.config.num_labels)
self.dropout_names = [name for name, module in self.named_modules() if isinstance(module, nn.Dropout)]
self._init_weights(self.classifier)
def _forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
**kwargs
):
encoder_outputs = super().forward(input_ids, attention_mask, token_type_ids, return_dict=True, **kwargs)
# (B, H)
pooled_output = encoder_outputs.pooler_output
# (B, NL)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, encoder_outputs
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs
) -> SequenceClassifierOutput:
if self.training_args.n_drop > 0 and self.training:
self.close_dropout()
rng_states = get_rng_states()
logits, encoder_outputs = self._forward(input_ids, attention_mask, token_type_ids, **kwargs)
if self.training_args.n_drop > 0 and self.training:
self.restore_dropout()
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
# (1 + C, B, NL)
cmp_logits = [logits]
for i in range(self.training_args.n_drop):
self.power_dropout(1 + i)
set_rng_states(*rng_states)
_logits, _ = self._forward(input_ids, attention_mask, token_type_ids, **kwargs)
self.restore_dropout()
cmp_logits.append(_logits)
# classification losses
cls_losses: torch.Tensor = None # (B, NL)
if self.config.problem_type == "regression":
loss_fct = MSELoss(reduction='none')
if self.config.num_labels == 1:
cls_losses = torch.stack(
[loss_fct(_logits.squeeze(), labels.squeeze()) for _logits in cmp_logits],
dim=1
)
else:
cls_losses = torch.stack(
[loss_fct(_logits, labels) for _logits in cmp_logits],
dim=1
)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss(reduction='none')
cls_losses = torch.stack(
[loss_fct(_logits.view(-1, self.config.num_labels), labels.view(-1)) for _logits in cmp_logits],
dim=1
)
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss(reduction='none')
cls_losses = torch.stack(
[loss_fct(_logits, labels) for _logits in cmp_logits],
dim=1
)
loss = {
f"cls_{i}": cls_losses[:, i] # (B,)
for i in range(1 + self.training_args.n_drop)
}
loss['cls'] = get_primary(cls_losses, self.training_args.loss_primary) # (B,)
# comparative loss
if self.training_args.n_drop > 0:
# (B, 1 + C) log likelihood
cls_effects = -cls_losses / self.training_args.cr_temp # .mean(dim=0, keepdim=True)
effect_labels = torch.arange(
cls_effects.size(-1), 0, -1, device=cls_effects.device
)[None, :].expand_as(cls_effects)
# (B, 1 + C + 1) log likelihood
cls_effects_ = torch.cat(
[cls_effects, get_objective(cls_effects, self.training_args.loss_target)],
dim=1
)
effect_labels_ = torch.arange(
cls_effects_.size(-1), 0, -1, device=cls_effects_.device
)[None, :].expand_as(cls_effects_)
cmp_fct = list_mle if self.training_args.listwise else pairwise_hinge
# (B,)
loss['cmp'] = cmp_fct(cls_effects_, effect_labels_, reduction='none')
loss['pair_reg'] = pairwise_hinge(cls_effects, effect_labels, reduction='none')
loss['list_reg'] = list_mle(cls_effects, effect_labels, reduction='none')
if self.training_args.just_cmp:
loss['overall'] = loss['cmp']
else:
loss['overall'] = loss['cls']
if not (self.training_args.cr_schedule and loss['cls'].mean() > 1.1):
loss['overall'] += (loss['list_reg'] if self.training_args.listwise else
loss['pair_reg']) * self.training_args.cr_weight
else:
loss['overall'] = loss['cls']
# average in batch
for k, v in loss.items():
loss[k] = v.mean()
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def check_deterministic_dropout(use_cuda=True, dp=False):
class MultiDropout(nn.Module):
"""Used to check the determinism of two runs of dropout"""
def __init__(self):
super(MultiDropout, self).__init__()
self.dropout1 = nn.Dropout(p=0.2)
self.dropout2 = nn.Dropout(p=0.2)
self.dropout3 = nn.Dropout(p=0.2)
self.dropout_names = [name for name, module in self.named_modules() if isinstance(module, nn.Dropout)]
def power_dropout(self, power: int = 1):
for name in self.dropout_names:
module = self.get_submodule(name)
module.__setattr__('orig_p', module.p)
if power != 1:
module.p = 1 - (1 - module.p) ** power
module.__setattr__('orig_mode', module.training)
module.train(True)
def close_dropout(self):
for name in self.dropout_names:
module = self.get_submodule(name)
module.__setattr__('orig_p', module.p)
module.p = 0
module.__setattr__('orig_mode', module.training)
def restore_dropout(self):
for name in self.dropout_names:
module = self.get_submodule(name)
module.p = module.orig_p
module.__delattr__('orig_p')
module.train(module.orig_mode)
module.__delattr__('orig_mode')
def _forward(self, x: torch.Tensor) -> torch.Tensor:
x1 = self.dropout1(x)
# print(x1)
x2 = self.dropout2(x1)
# print(x2)
x3 = self.dropout3(x2)
# print(x3)
return x3
def forward(self, x: torch.Tensor) -> torch.Tensor:
rng_states = get_rng_states()
self.close_dropout()
x0 = self._forward(x)
self.restore_dropout()
set_rng_states(*rng_states)
x1 = self._forward(x)
assert len(set(tuple(idx) for idx in x1.nonzero().squeeze(-1).tolist()) -
set(tuple(idx) for idx in x0.nonzero().squeeze(-1).tolist())) == 0, f"{x0}\n{x1}"
self.power_dropout(2)
set_rng_states(*rng_states)
x2 = self._forward(x)
self.restore_dropout()
assert len(set(tuple(idx) for idx in x2.nonzero().squeeze(-1).tolist()) -
set(tuple(idx) for idx in x1.nonzero().squeeze(-1).tolist())) == 0, f"{x1}\n{x2}"
self.power_dropout(1)
set_rng_states(*rng_states)
x3 = self._forward(x)
assert torch.all(x1 == x3), f"{x1}\n{x3}"
return x1
from tqdm.auto import tqdm
model = MultiDropout()
xx = torch.ones(16, 1000)
if use_cuda:
model = model.cuda()
xx = xx.cuda()
if dp and torch.cuda.device_count() > 1:
print(torch.cuda.device_count())
model = nn.DataParallel(model)
for _ in tqdm(range(1000)):
model(xx)
print('OK')
| 28,205 | 41.224551 | 125 | py |
CmpLoss | CmpLoss-main/trainer.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A subclass of `Trainer` specific to Question-Answering tasks
"""
import collections
import json
import logging
import math
import os
import sys
import time
from types import MethodType
from typing import Any, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from transformers.integrations import (
hp_params,
)
import numpy as np
import torch
from torch import nn
from torch.cuda.amp import autocast
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import PretrainedConfig, PreTrainedModel, __version__
from transformers.deepspeed import deepspeed_init, deepspeed_reinit
from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
from transformers.file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
is_apex_available,
is_sagemaker_dp_enabled,
is_torch_tpu_available,
)
from transformers.integrations import TensorBoardCallback
from transformers.trainer import Trainer, TRAINER_STATE_NAME
from transformers.trainer_callback import TrainerState
from transformers.trainer_pt_utils import (
IterableDatasetShard,
find_batch_size,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
)
from transformers.trainer_utils import (
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainOutput,
denumpify_detensorize,
get_last_checkpoint,
has_length,
set_seed,
speed_metrics,
)
if is_apex_available():
from apex import amp
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
else:
import torch.distributed as dist
from arguments import TRAINING_ARGS_NAME
from modeling import HEAD_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def tb_on_train_begin(self, args, state, control, **kwargs):
if not state.is_world_process_zero:
return
log_dir = None
if state.is_hyper_param_search:
trial_name = state.trial_name
if trial_name is not None:
log_dir = os.path.join(args.logging_dir, trial_name)
if self.tb_writer is None:
self._init_summary_writer(args, log_dir)
if self.tb_writer is not None:
self.tb_writer.add_text("args", args.to_json_string())
if "model" in kwargs:
model = kwargs["model"]
if hasattr(model, "config") and model.config is not None:
model_config_json = model.config.to_json_string()
self.tb_writer.add_text("model_config", model_config_json)
# Version of TensorBoard coming from tensorboardX does not have this method.
if False and hasattr(self.tb_writer, "add_hparams"): # the only change: force no add_hparams
self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={})
# noinspection PyProtectedMember
class CmpTrainer(Trainer):
def __init__(self, *argv, **kwargs):
super().__init__(*argv, **kwargs)
for cb in self.callback_handler.callbacks:
if isinstance(cb, TensorBoardCallback):
cb.on_train_begin = MethodType(tb_on_train_begin, cb)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
# torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
with open(os.path.join(output_dir, TRAINING_ARGS_NAME), 'w') as f:
json.dump(self.args.to_dict(), f, ensure_ascii=False, indent=2)
@staticmethod
def _load_state_file_to_module(state_dict_path, module, warning=True):
state_dict = torch.load(state_dict_path, map_location="cpu")
load_result = module.load_state_dict(state_dict, strict=False)
del state_dict
if not warning:
return
if len(load_result.missing_keys) != 0:
if hasattr(module, '_keys_to_ignore_on_save') and module._keys_to_ignore_on_save is not None and \
set(load_result.missing_keys) == set(module._keys_to_ignore_on_save):
module.tie_weights()
else:
logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warning(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss: Union[torch.Tensor, Dict[str, torch.Tensor]],
model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
logs: Dict[str, float] = {}
if isinstance(tr_loss, dict) and 'overall' in tr_loss:
for k in tr_loss:
# all_gather + mean() to get average loss over all processes
loss_scalar = self._nested_gather(tr_loss[k]).mean().item()
tr_loss[k] -= tr_loss[k] # reset to zero
if k == 'overall':
log_key = "loss"
self._total_loss_scalar += loss_scalar
else:
log_key = f"loss_{k}"
logs[log_key] = round(loss_scalar / (self.state.global_step - self._globalstep_last_logged), 6)
else:
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
tr_loss -= tr_loss # reset to zero
self._total_loss_scalar += tr_loss_scalar
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 6)
# noinspection PyArgumentList
logs["learning_rate"] = self._get_learning_rate()
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, torch.Tensor]:
"""Perform a training step on a batch of inputs.
Args:
model: The model to train.
inputs: The inputs and targets of the model.
Returns:
Dict[str, torch.Tensor]: The Dict of tensors with detached training losses on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if isinstance(loss, dict):
if self.args.n_gpu > 1:
loss = {k: v.mean() for k, v in loss.items()} # mean() to average on multi-gpu parallel training
overall_loss = loss['overall']
else:
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
overall_loss = loss
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
overall_loss = overall_loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(overall_loss).backward()
elif self.use_apex:
with amp.scale_loss(overall_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
overall_loss = self.deepspeed.backward(overall_loss)
else:
overall_loss.backward()
if isinstance(loss, dict):
return {k: v.detach() for k, v in loss.items()}
else:
return {'overall': overall_loss.detach()}
# noinspection PyAttributeOutsideInit
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
resume_from_checkpoint = None if not resume_from_checkpoint else resume_from_checkpoint
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
self._move_model_to_device(self.model, args.device)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
plm_weights_path = os.path.join(resume_from_checkpoint, WEIGHTS_NAME)
if not os.path.isfile(plm_weights_path):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warning(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
self._load_state_file_to_module(
plm_weights_path, self.model if isinstance(self.model, PreTrainedModel) else self.model.encoder
)
head_weights_path = os.path.join(resume_from_checkpoint, HEAD_WEIGHTS_NAME)
if os.path.isfile(head_weights_path):
self._load_state_file_to_module(head_weights_path, self.model, False)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % num_update_steps_per_epoch
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
if trial is not None:
assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.state.trial_params = hp_params(assignments)
else:
self.state.trial_params = None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# Each value in tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss: Dict[str, torch.Tensor] = dict()
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
step = -1
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
step_loss = self.training_step(model, inputs)
else:
step_loss = self.training_step(model, inputs)
for k, _loss in step_loss.items():
if k not in tr_loss:
tr_loss[k] = torch.zeros_like(_loss)
if (
args.logging_nan_inf_filter and
not is_torch_tpu_available() and
(torch.isnan(_loss) or torch.isinf(_loss))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss[k] += tr_loss[k] / (1 + self.state.global_step - self._globalstep_last_logged)
else:
tr_loss[k] += _loss
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
(step + 1) == steps_in_epoch <= args.gradient_accumulation_steps
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.do_grad_scaling:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.do_grad_scaling:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
if step < 0:
logger.warning(
f"There seems to be not a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self.control.should_evaluate = args.do_eval
self.control.should_save = args.do_eval
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
best_plm_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
if os.path.exists(best_plm_path):
if self.deepspeed:
# temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping
deepspeed_engine, optimizer, lr_scheduler = deepspeed_reinit(self)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True
)
else:
self._load_state_file_to_module(
best_plm_path, self.model if isinstance(self.model, PreTrainedModel) else self.model.encoder
)
best_head_path = os.path.join(self.state.best_model_checkpoint, HEAD_WEIGHTS_NAME)
if os.path.exists(best_head_path):
self._load_state_file_to_module(best_head_path, self.model, False)
else:
logger.warning(
f"Could not locate the best model at {self.state.best_model_checkpoint}, "
f"if you are running a distributed training on multiple nodes, "
f"you should activate `--save_on_each_node`."
)
# add remaining tr_loss
self._total_loss_scalar += tr_loss['overall'].item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = self.args.eval_batch_size
logger.info(f"***** Running {description} *****")
if has_length(dataloader):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = getattr(dataloader, "dataset", None)
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
inputs_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_inputs = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = inputs["input_ids"] if args.include_inputs_for_metrics else None
if is_torch_tpu_available():
xm.mark_step()
# Update containers on host
if loss is not None:
losses = {k: self._nested_gather(v.repeat(batch_size)) for k, v in loss.items()}
if losses_host is None:
losses_host = losses
else:
for k in losses_host.keys():
losses_host[k] = torch.cat((losses_host[k], losses[k]), dim=0)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_decode = self._pad_across_processes(inputs_decode)
inputs_decode = self._nested_gather(inputs_decode)
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = {k: nested_numpify(v) for k, v in losses_host.items()}
if all_losses is None:
all_losses = losses
else:
for k in all_losses.keys():
all_losses[k] = np.concatenate((all_losses[k], losses[k]), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode
if all_inputs is None
else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, inputs_host, labels_host = None, None, None, None
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = {k: nested_numpify(v) for k, v in losses_host.items()}
if all_losses is None:
all_losses = losses
else:
for k in all_losses.keys():
all_losses[k] = np.concatenate((all_losses[k], losses[k]), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
num_samples = eval_dataset.num_examples
else:
if has_length(dataloader):
num_samples = self.num_examples(dataloader)
else: # both len(dataloader.dataset) and len(dataloader) fail
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = {k: all_losses[k][:num_samples] for k in all_losses.keys()}
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
if all_inputs is not None:
all_inputs = nested_truncate(all_inputs, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
for k in all_losses.keys():
loss_key = 'loss' if k == 'overall' else f'loss_{k}'
metrics[f"{metric_key_prefix}_{loss_key}"] = all_losses[k].mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[Dict[str, torch.Tensor]], Optional[torch.Tensor], Optional[torch.Tensor]]:
has_labels = len(self.label_names) > 0 and all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
with self.autocast_smart_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
if isinstance(loss, dict):
loss = {k: loss[k].mean().detach() for k in loss.keys()}
else:
loss = {'overall': loss.mean().detach()}
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
with self.autocast_smart_context_manager():
outputs = model(**inputs)
loss = None
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
if prediction_loss_only:
return loss, None, None
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return loss, logits, labels
class QATrainer(CmpTrainer):
def __init__(self, *argv, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*argv, **kwargs)
self.label_names = ["start_positions", "end_positions"]
self.eval_examples = eval_examples
self.post_process_function = post_process_function
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_examples = self.eval_examples if eval_examples is None else eval_examples
eval_dataloader = self.get_eval_dataloader(eval_dataset)
# start_time = time.time()
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
try:
output = self.evaluation_loop(
eval_dataloader,
description="Evaluation",
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
pred_out_dir = self.args.output_dir
if self.is_in_train:
pred_out_dir = os.path.join(pred_out_dir, f"checkpoint-{self.state.global_step}")
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions,
pred_out_dir, "eval")
output.metrics.update(self.compute_metrics(eval_preds))
# Prefix all keys with metric_key_prefix + '_'
for key in list(output.metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
output.metrics[f"{metric_key_prefix}_{key}"] = output.metrics.pop(key)
# total_batch_size = self.args.eval_batch_size * self.args.world_size
# output.metrics.update(
# speed_metrics(
# metric_key_prefix, start_time, num_samples=output.num_samples,
# num_steps=math.ceil(output.num_samples / total_batch_size),
# )
# )
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(self, test_dataset, test_examples=None, ignore_keys=None, metric_key_prefix: str = "test"):
# memory metrics - must set up as early as possible
self._memory_tracker.start()
predict_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
try:
output = self.evaluation_loop(
predict_dataloader,
description="Prediction",
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
if self.post_process_function is None or test_examples is None:
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
test_preds = self.post_process_function(test_examples, test_dataset, output.predictions,
output_dir=self.args.output_dir, stage="test")
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=test_preds.predictions, label_ids=output.label_ids, metrics=output.metrics)
| 49,544 | 46.140818 | 130 | py |
CmpLoss | CmpLoss-main/utils/data.py | from collections import OrderedDict
from dataclasses import dataclass
import json
import logging
import os
import random
from tqdm.auto import tqdm
from typing import Any, List, Dict, Optional, Tuple
import numpy as np
import torch
from torch.utils.data import Dataset
from transformers import LongformerTokenizerFast, PreTrainedTokenizerFast
from utils.tensor import pad_tensors
logger = logging.getLogger(__name__)
def load_corpus(corpus_path: str, tokenizer: PreTrainedTokenizerFast = None, recache: bool = False) -> Dict:
corpus = dict()
logger.info(f"Loading corpus from {corpus_path} ...")
with open(corpus_path) as f:
col_names = None
for line in f:
segs = [field.strip() for field in line.strip().split('\t')]
if not col_names:
col_names = segs
continue
if len(segs) != len(col_names) or segs[0] == 'id':
logger.warning(f'Wrong line format: {segs[0]}')
continue
fields = {k: v for k, v in zip(col_names, segs)}
p_id, text = fields['id'], fields['text']
title = fields['title'] if 'title' in fields else ''
if text == '' and title == '':
logger.warning(f"empty passage: {p_id}")
continue
sentence_spans = [tuple(span) for span in
eval(fields['sentence_spans'])] if 'sentence_spans' in fields else [(0, len(text))]
if title in corpus:
logger.warning(f"Duplicate passage: {p_id} ({title})")
corpus[title] = {
"id": p_id,
"text": text,
"sentence_spans": sentence_spans
}
logger.info(f"{len(corpus):,d} passages Loaded")
if not tokenizer:
return corpus
corpus_dir, corpus_file = os.path.split(corpus_path)
cache_dir = os.path.join(corpus_dir, '.cache')
cache_file = f"{corpus_file.rsplit('.', 1)[0]}.{tokenizer.name_or_path.replace('/', '_')}.tsv"
cache_path = os.path.join(cache_dir, cache_file)
if recache or not os.path.exists(cache_path):
logger.info(f"Tokenizing and caching {corpus_path} into {cache_path}")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
with open(cache_path, 'w') as f:
for title, psg in tqdm(corpus.items(), total=len(corpus)):
title_codes = tokenizer(title, add_special_tokens=False,
return_attention_mask=False, return_offsets_mapping=True)
text_codes = tokenizer(psg['text'], add_special_tokens=False,
return_attention_mask=False, return_offsets_mapping=True)
f.write(f"{title}\t{json.dumps(dict(title_codes))}\t{json.dumps(dict(text_codes))}\n")
with open(cache_path) as f:
for line in f:
title, title_codes, text_codes = [field.strip() for field in line.strip().split('\t')]
corpus[title]['title_codes'] = json.loads(title_codes)
corpus[title]['text_codes'] = json.loads(text_codes)
logger.info(f"Tokenized cache is loaded from {cache_path}")
return corpus
def construct_char2token(token2spans, char_num):
char2token = [-1] * char_num
for tok_idx, (char_start, char_end) in enumerate(token2spans):
for char_idx in range(char_start, char_end):
char2token[char_idx] = tok_idx
return char2token
def char2token_span(index_map: List[int], span: Tuple[int, int]) -> Tuple[int, int]:
s, e = span # [s, e]
assert s <= e, f"[{s}, {e}]"
if not 0 <= s <= e < len(index_map):
return -1, -1
while index_map[s] < 0 and s + 1 <= e:
s += 1
ns = index_map[s]
if ns < 0:
return -1, -1
while index_map[e] < 0 and e - 1 >= s:
e -= 1
ne = index_map[e] # include
assert ns <= ne
return ns, ne
def prepare_squad_features(examples: Dict[str, List], tokenizer: PreTrainedTokenizerFast,
max_seq_len: int = 512, doc_stride: int = 128, pad_to_max_length: bool = False):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples['question'] = [q.lstrip() for q in examples['question']]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == 'right'
context_idx = 1 if pad_on_right else 0
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples['question' if pad_on_right else 'context'],
examples['context' if pad_on_right else 'question'],
truncation='only_second' if pad_on_right else 'only_first',
max_length=max_seq_len,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding='max_length' if pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples['offset_mapping']
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples['example_id'] = []
if 'answers' in examples:
# Let's label those examples!
tokenized_examples['start_positions'] = []
tokenized_examples['end_positions'] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples['input_ids'][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples['example_id'].append(examples['id'][sample_index])
if 'answers' in examples:
answers = examples['answers'][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers['answer_start']) == 0:
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
# XXX: only the first answer is considered
# Start/end character index of the answer in the text.
start_char = answers['answer_start'][0]
end_char = start_char + len(answers['text'][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != context_idx:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != context_idx:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples['start_positions'].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples['end_positions'].append(token_end_index + 1)
# Set to None the offset_mapping that are not part of the context,
# so it's easy to determine if a token position is part of the context or not.
offset_mapping[i] = [o if sequence_ids[k] == context_idx else None for k, o in enumerate(offset_mapping[i])]
return tokenized_examples
@dataclass
class MultiDocQACollator:
pad_token_id: Optional[int] = 0
pad_to_multiple_of: Optional[int] = 1
def __call__(self, samples: List[Dict[str, Any]]) -> Dict[str, Any]:
bsz = len(samples)
if bsz == 0:
return dict()
nn_keys = ["input_ids", "attention_mask", "token_type_ids", "global_attention_mask", "answer_mask"]
if "start_positions" in samples[0]:
nn_keys.extend(["start_positions", "end_positions"])
if torch.is_tensor(samples[0]['input_ids']):
n_view = 1
nn_input: Dict[str, List[torch.Tensor]] = {
k: [sample[k] for sample in samples] for k in nn_keys
}
else:
n_view = len(samples[0]['input_ids'])
nn_input: Dict[str, List[torch.Tensor]] = {
k: [sample[k][i] for sample in samples for i in range(n_view)] for k in nn_keys
}
batch: Dict[str, torch.Tensor] = dict()
for k, v in nn_input.items():
if k in ["start_positions", "end_positions"]:
batch[k] = torch.stack(v).view(bsz, n_view)
elif k == 'input_ids':
batch[k] = pad_tensors(v, self.pad_token_id, self.pad_to_multiple_of).view(bsz, n_view, -1)
else:
batch[k] = pad_tensors(v, 0, self.pad_to_multiple_of).view(bsz, n_view, -1)
batch[k].squeeze_(1)
batch.update({
k: [sample[k][0] if n_view > 1 else sample[k] for sample in samples] for k in samples[0] if k not in batch
})
return batch
class MultiDocQADataset(Dataset):
def __init__(self, data_path: str, tokenizer: LongformerTokenizerFast, corpus: Dict,
max_seq_len: int = 4096, max_q_len: int = 128, max_p_len: int = 256, max_p_num: int = None,
mode: str = 'test', crop_times: int = 0, recache: bool = False):
self.tokenizer = tokenizer
self.corpus = corpus
self.max_seq_len = max_seq_len
self.max_q_len = max_q_len
self.max_p_len = max_p_len
self.max_p_num = max_p_num
self.mode = mode
self.crop_times = crop_times
self.q_ids = []
self.examples = dict()
with open(data_path) as f:
for line in f:
q_id, question, context, answers, sp_facts = [field.strip() for field in line.strip().split('\t')]
if q_id == 'id':
continue
context, answers, sp_facts = json.loads(context), json.loads(answers), json.loads(sp_facts)
answers['positions'] = [tuple(pos) for pos in answers['positions']]
example = {
"id": q_id,
"question": question,
"context": {title: corpus[title] for title in context},
"answers": answers,
"sp_facts": sp_facts
}
if max_p_num is not None:
for title in list(example['context'].keys()):
if len(example['context']) <= max_p_num:
break
if title not in example['sp_facts']:
example['context'].pop(title)
self.q_ids.append(q_id)
self.examples[q_id] = example
data_dir, data_file = os.path.split(data_path)
cache_dir = os.path.join(data_dir, '.cache')
cache_file = f"{data_file.rsplit('.', 1)[0]}.{self.tokenizer.name_or_path.replace('/', '_')}.tsv"
cache_path = os.path.join(cache_dir, cache_file)
if recache or not os.path.exists(cache_path):
logger.info(f"Tokenizing and caching questions of {data_path} into {cache_path}")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
with open(cache_path, 'w') as f:
for q_id, example in tqdm(self.examples.items(), total=len(self.examples)):
ques_codes = self.tokenizer(example['question'], add_special_tokens=False,
return_attention_mask=False, return_offsets_mapping=True)
f.write(f"{q_id}\t{json.dumps(dict(ques_codes))}\n")
with open(cache_path) as f:
for line in f:
q_id, ques_codes = [field.strip() for field in line.strip().split('\t')]
self.examples[q_id]['ques_codes'] = json.loads(ques_codes)
def __len__(self):
return len(self.examples)
def _construct_feature(self, example, doc_seq: List[str],
ans_pos: Tuple[str, int, int, int] = None) -> Dict[str, Any]:
yes = 'ĠYES'
no = 'ĠNO'
soq = '????????'
sod = 'madeupword0000'
sop = 'madeupword0001'
# sos = 'madeupword0002'
yes_id = self.tokenizer.convert_tokens_to_ids(yes)
no_id = self.tokenizer.convert_tokens_to_ids(no)
soq_id = self.tokenizer.convert_tokens_to_ids(soq)
sod_id = self.tokenizer.convert_tokens_to_ids(sod)
sop_id = self.tokenizer.convert_tokens_to_ids(sop)
# sos_id = self.tokenizer.convert_tokens_to_ids(sos)
cls_id = self.tokenizer.cls_token_id
sep_id = self.tokenizer.sep_token_id
if not ans_pos:
answer_start, answer_end = None, None
elif ans_pos[:2] == ("012Q", -1):
answer_start, answer_end = 1 + ans_pos[2], 1 + ans_pos[3]
else:
answer_start, answer_end = -100, -100
'''
<s> [YES] [NO] [Q] q </s> [T] t1 [P] p1 [T] t2 [P] p2 </s>
1 + 3 + |Q| + 1 + np * (2 + |P|) + 1
'''
input_ids = [cls_id, yes_id, no_id, soq_id]
token_type_ids = [0] * len(input_ids)
global_attention_mask = [1] * len(input_ids)
answer_mask = [0] + [1] * (len(input_ids) - 1)
# concatenate question
question_codes = example['ques_codes']
input_ids += question_codes['input_ids'][:self.max_q_len]
global_attention_mask += [1] * (len(input_ids) - len(global_attention_mask))
input_ids.append(sep_id)
global_attention_mask.append(0)
token_type_ids += [0] * (len(input_ids) - len(token_type_ids))
answer_mask += [0] * (len(input_ids) - len(answer_mask))
assert len(input_ids) == len(token_type_ids) == len(global_attention_mask) == len(answer_mask)
range2para: Dict[Tuple[int, int], Tuple[str, int]] = OrderedDict() # closed interval -> paragraph position
for title in doc_seq:
if len(input_ids) >= self.max_seq_len:
break
doc = example['context'][title]
if self.mode == 'train' and ans_pos[:2] == (title, 1):
max_p_len = max(self.max_p_len, ans_pos[3] + 12) # should at least include the answer
else:
max_p_len = self.max_p_len
# concatenate title
input_ids.append(sod_id)
token_type_ids.append(1)
global_attention_mask.append(1)
answer_mask.append(0)
title_offset = len(input_ids)
input_ids += doc['title_codes']['input_ids']
range2para[(title_offset, len(input_ids) - 1)] = (title, 0)
token_type_ids += [1] * (len(input_ids) - len(token_type_ids))
global_attention_mask += [0] * (len(input_ids) - len(global_attention_mask))
answer_mask += [1] * (len(input_ids) - len(answer_mask))
# label answer span in title tokens
if ans_pos and ans_pos[:2] == (title, 0):
char2token = construct_char2token(doc['title_codes']['offset_mapping'], len(title))
start_char, end_char = ans_pos[2:]
start_token, end_token = char2token_span(char2token, (start_char, end_char - 1))
if start_token >= 0:
answer_start = title_offset + start_token
answer_end = title_offset + end_token
# concatenate text
input_ids.append(sop_id)
token_type_ids.append(1)
global_attention_mask.append(1)
answer_mask.append(0)
text_offset = len(input_ids)
input_ids += doc['text_codes']['input_ids'][:max_p_len]
range2para[(text_offset, len(input_ids) - 1)] = (title, 1)
token_type_ids += [1] * (len(input_ids) - len(token_type_ids))
global_attention_mask += [0] * (len(input_ids) - len(global_attention_mask))
answer_mask += [1] * (len(input_ids) - len(answer_mask))
# label answer span in text tokens
if ans_pos and ans_pos[:2] == (title, 1):
char2token = construct_char2token(doc['text_codes']['offset_mapping'], len(doc['text']))
start_char, end_char = ans_pos[2:]
start_token, end_token = char2token_span(char2token, (start_char, end_char - 1))
if start_token >= 0:
answer_start = text_offset + start_token
answer_end = text_offset + end_token
assert len(input_ids) == len(token_type_ids) == len(global_attention_mask) == len(answer_mask)
if len(input_ids) >= self.max_seq_len:
input_ids = input_ids[:self.max_seq_len - 1]
token_type_ids = token_type_ids[:len(input_ids)]
global_attention_mask = global_attention_mask[:len(input_ids)]
answer_mask = answer_mask[:len(input_ids)]
if answer_end is not None and answer_end >= self.max_seq_len - 1:
answer_start, answer_end = -100, -100
input_ids.append(sep_id)
token_type_ids.append(1)
global_attention_mask.append(0)
answer_mask.append(0)
feature = {
"q_id": example['id'],
"range2para": range2para,
"input_ids": torch.tensor(input_ids, dtype=torch.long),
"attention_mask": torch.tensor([1] * len(input_ids), dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"global_attention_mask": torch.tensor(global_attention_mask, dtype=torch.long),
"answer_mask": torch.tensor(answer_mask, dtype=torch.float),
}
if self.mode == 'test' or None in (answer_start, answer_end):
return feature
feature.update({
"start_positions": torch.tensor(answer_start, dtype=torch.long),
"end_positions": torch.tensor(answer_end, dtype=torch.long),
})
return feature
def __getitem__(self, index) -> Dict[str, Any]:
q_id = self.q_ids[index]
example = self.examples[q_id]
titles = list(example['context'].keys())
if self.mode == 'test':
return self._construct_feature(example, titles)
if self.mode == 'train':
random.shuffle(titles)
ans_pos = random.choice(example['answers']['positions'])
else:
ans_pos = example['answers']['positions'][0]
feature = self._construct_feature(example, titles, ans_pos)
if self.mode != 'train': # cache token offsets for prediction
example['range2para'] = feature['range2para']
if self.crop_times == 0:
return feature
features = [feature]
doc2rank = {d: i for i, d in enumerate(titles)}
support_docs = list(example['sp_facts'].keys())
distractor_docs = [d for d in titles if d not in example['sp_facts']]
if self.mode == 'train':
distractor_docs = np.random.permutation(distractor_docs).tolist()
if len(distractor_docs) > self.crop_times:
distractor_nums = sorted(
np.random.choice(range(len(distractor_docs)), size=self.crop_times, replace=False), reverse=True
)
else:
distractor_nums = list(range(len(distractor_docs) - 1, -1, -1))
distractor_nums += [0] * (self.crop_times - len(distractor_nums))
else:
distractor_nums = [6, 4, 2, 0][:self.crop_times]
for nd in distractor_nums:
doc_seq = sorted(support_docs + distractor_docs[:nd], key=lambda d: doc2rank[d])
features.append(self._construct_feature(example, doc_seq, ans_pos))
return {k: [f[k] for f in features] for k in feature}
| 21,763 | 45.504274 | 118 | py |
CmpLoss | CmpLoss-main/utils/qa.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Post-processing utilities for question answering.
"""
import collections
import json
import logging
import os
from typing import Dict, List, Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
from utils.data import MultiDocQADataset
logger = logging.getLogger(__name__)
def postprocess_squad_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
"""
if len(predictions) < 2:
raise ValueError("`predictions` should be a tuple with at least 2 elements "
"(start_logits, end_logits, *pred_starts, *pred_ends).")
all_start_logits, all_end_logits = predictions[:2]
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
# Logging.
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction: Dict = None
prelim_predictions: List[Dict] = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1: -n_best_size - 1: -1].tolist()
end_indexes = np.argsort(end_logits)[-1: -n_best_size - 1: -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or len(offset_mapping[start_index]) < 2
or offset_mapping[end_index] is None
or len(offset_mapping[end_index]) < 2
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
null_score = None
if version_2_with_negative and min_null_prediction is not None:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if (
version_2_with_negative
and min_null_prediction is not None
and not any(p["offsets"] == (0, 0) for p in predictions)
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0]: offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
assert null_score is not None
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
def postprocess_hotpot_predictions(
examples,
features: MultiDocQADataset,
predictions: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
n_best_size: int = 20,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
):
if len(predictions) != 4:
raise ValueError("`predictions` should be a tuple with 4 elements "
"(start_logits, end_logits, pred_starts, pred_ends).")
all_start_logits, all_end_logits, all_pred_starts, all_pred_ends = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} samples.")
if examples is None:
examples = features.examples
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_top_preds = collections.OrderedDict()
for i, q_id in enumerate(features.q_ids):
example = examples[q_id]
predictions: List[Dict] = []
start_logits, end_logits = all_start_logits[i], all_end_logits[i]
pred_starts, pred_ends = all_pred_starts[i], all_pred_ends[i]
for start_token, end_token in zip(pred_starts, pred_ends):
if len(predictions) >= n_best_size:
break
if start_token > end_token:
continue
if start_token == end_token == 1:
pred_ans = 'yes'
elif start_token == end_token == 2:
pred_ans = 'no'
else:
ans_para = None
tok_offset = None
for para_start, para_end in example['range2para'].keys():
if para_start <= start_token <= end_token <= para_end:
ans_para = example['range2para'][(para_start, para_end)]
tok_offset = para_start
break
if start_token < para_start:
break
if ans_para is None:
continue
title = ans_para[0]
doc = example['context'][title]
para_text = doc['text'] if ans_para[1] else title
token_spans = doc['text_codes' if ans_para[1] else 'title_codes']['offset_mapping']
star_char = token_spans[start_token - tok_offset][0]
end_char = token_spans[end_token - tok_offset][1]
pred_ans = para_text[star_char:end_char].strip()
predictions.append(
{
"text": pred_ans,
"start_logit": start_logits[start_token],
"end_logit": end_logits[end_token],
"score": start_logits[start_token] + end_logits[end_token],
}
)
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
all_predictions[q_id] = predictions[0]["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_top_preds[q_id] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
elif not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=2) + "\n")
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_top_preds, indent=4) + "\n")
return all_predictions
| 17,507 | 47.633333 | 119 | py |
CmpLoss | CmpLoss-main/utils/ranking.py | import logging
from itertools import product
from typing import Optional
import torch
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def list_mle(y_pred: torch.Tensor, y_true: torch.Tensor, mask: Optional[torch.Tensor] = None,
reduction: Optional[str] = 'mean', eps: Optional[float] = 1e-10) -> torch.Tensor:
"""ListMLE loss introduced in "Listwise Approach to Learning to Rank - Theory and Algorithm".
Args:
y_pred: (N, L) predictions from the model
y_true: (N, L) ground truth labels
mask: (N, L) 1 for available position, 0 for masked position
reduction: 'none' | 'mean' | 'sum'
eps: epsilon value, used for numerical stability
Returns:
torch.Tensor: scalar if `reduction` is not 'none' else (N,)
"""
# shuffle for randomized tie resolution
random_indices = torch.randperm(y_pred.shape[-1])
shuffled_y_pred = y_pred[:, random_indices]
shuffled_y_true = y_true[:, random_indices]
shuffled_mask = mask[:, random_indices] if mask is not None else None
sorted_y_true, rank_true = shuffled_y_true.sort(descending=True, dim=1)
y_pred_in_true_order = shuffled_y_pred.gather(dim=1, index=rank_true)
if shuffled_mask is not None:
y_pred_in_true_order = y_pred_in_true_order - 10000.0 * (1.0 - shuffled_mask)
max_y_pred, _ = y_pred_in_true_order.max(dim=1, keepdim=True)
y_pred_in_true_order_minus_max = y_pred_in_true_order - max_y_pred
cum_sum = y_pred_in_true_order_minus_max.exp().flip(dims=[1]).cumsum(dim=1).flip(dims=[1])
observation_loss = torch.log(cum_sum + eps) - y_pred_in_true_order_minus_max
if shuffled_mask is not None:
observation_loss[shuffled_mask == 0] = 0.0
loss = observation_loss[:, :-1].sum(dim=1)
# loss = observation_loss.sum(dim=1)
if reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
return loss.mean()
def pairwise_hinge(y_pred: torch.Tensor, y_true: torch.Tensor, mask: Optional[torch.Tensor] = None,
margin: Optional[float] = 0., reduction: Optional[str] = 'mean') -> torch.Tensor:
"""RankNet loss introduced in "Learning to Rank using Gradient Descent".
Args:
y_pred: (N, L) predicted scores
y_true: (N, L) ground truth labels
mask: (N, L) 1 for available position, 0 for masked position
margin:
reduction: 'none' | 'mean' | 'sum'
Returns:
torch.Tensor: scalar if `reduction` is not 'none' else (N,)
"""
if mask is not None:
y_pred = y_pred.clone()
y_true = y_true.clone()
y_pred[mask == 0] = float('-inf')
y_true[mask == 0] = float('-inf')
# generate every pair of indices from the range of candidates number in the batch
candidate_pairs = list(product(range(y_true.shape[1]), repeat=2)) # (L^2, 2)
# (N, L^2, 2)
pairs_true = y_true[:, candidate_pairs]
pairs_pred = y_pred[:, candidate_pairs]
# calculate the relative true relevance of every candidate pair
true_diffs = pairs_true[:, :, 0] - pairs_true[:, :, 1] # (N, L^2)
# filter just the pairs that are 'positive' and did not involve a padded instance
# we can do that since in the candidate pairs we had symmetric pairs, so we can stick with
# positive ones for a simpler loss function formulation
the_mask = (true_diffs > 0) & (~torch.isinf(true_diffs)) # (N, L^2)
# (num_pairs,)
s1 = pairs_pred[:, :, 0][the_mask]
s2 = pairs_pred[:, :, 1][the_mask]
target = the_mask.float()[the_mask]
# (N, L^2)
pair_losses = torch.zeros_like(pairs_pred[:, :, 0])
pair_losses[the_mask] = F.margin_ranking_loss(s1, s2, target, margin=margin, reduction='none')
# pair_losses[the_mask] = (s2 - s1 + margin).relu()
# (N,)
loss = pair_losses.sum(dim=1) / the_mask.sum(dim=1)
if reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
return loss.mean()
| 4,044 | 37.52381 | 100 | py |
CmpLoss | CmpLoss-main/utils/tensor.py | from typing import List
import torch
def mask_where0(x, m):
"""
Args:
x (torch.Tensor): (*)
m (torch.Tensor): same size as logits
1 for positions that are NOT MASKED, 0 for MASKED positions.
Returns:
torch.Tensor: same size as logits
"""
if x.dtype == torch.float16:
return x * m - 65500 * (1 - m)
else:
return x * m - 1e30 * (1 - m)
def pad_tensors(tensors: List[torch.Tensor], pad_val: int, pad_to_multiple_of: int = 1,
left_pad: bool = False, move_eos_to_beginning: bool = False, eos_val: bool = None):
"""Convert a list of 1d tensors into a padded 2d tensor."""
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_val
dst[0] = eos_val
dst[1:] = src[:-1]
else:
dst.copy_(src)
if len(tensors[0].size()) > 1:
tensors = [x.view(-1) for x in tensors]
batch_size = len(tensors)
max_len = max(x.size(0) for x in tensors)
if max_len % pad_to_multiple_of != 0:
max_len = ((max_len // pad_to_multiple_of) + 1) * pad_to_multiple_of
padded_tensor = tensors[0].new_full((batch_size, max_len), pad_val, requires_grad=tensors[0].requires_grad)
for i, x in enumerate(tensors):
copy_tensor(x, padded_tensor[i, max_len - len(x):] if left_pad else padded_tensor[i, :len(x)])
return padded_tensor
| 1,471 | 31 | 111 | py |
BPAM | BPAM-master/Baselines/DeepCF/MLP2.py | import numpy as np
import tensorflow as tf
from keras import initializers
from keras.regularizers import l2
from keras.models import Model
from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Lambda, Reshape
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras import backend as K
from evaluate import evaluate_model
from Dataset import Dataset
from time import time
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run MLP.")
parser.add_argument('--path', nargs='?', default='Data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=20,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[512,256,128,64]',
help="Size of each layer. Note that the first layer is the "
"concatenation of user and item embeddings. So layers[0]/2 is the embedding size.")
parser.add_argument('--reg_layers', nargs='?', default='[0,0,0,0]',
help="Regularization for each layer")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
return parser.parse_args()
def get_model(train, num_users, num_items, layers=[20, 10, 5, 2], reg_layers=[0, 0, 0, 0]):
# assert len(layers) == len(reg_layers)
num_layer = len(layers) # Number of layers in the MLP
user_matrix = K.constant(getTrainMatrix(train))
item_matrix = K.constant(getTrainMatrix(train).T)
# Input variables
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
user_rating = Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user_input)
item_rating = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item_input)
# 不Reshape的话就变成是三个维度了,中间一个维度是1,Reshape后是二维的,第一个维度是batch,第二个是物品数/用户数
user_rating = Reshape((num_items, ))(user_rating)
item_rating = Reshape((num_users, ))(item_rating)
MLP_Embedding_User = Dense(layers[0]//2, kernel_regularizer=l2(reg_layers[0]), activation="linear" , name='user_embedding')
MLP_Embedding_Item = Dense(layers[0]//2, kernel_regularizer=l2(reg_layers[0]), activation="linear" , name='item_embedding')
user_latent = MLP_Embedding_User(user_rating)
item_latent = MLP_Embedding_Item(item_rating)
# The 0-th layer is the concatenation of embedding layers
# vector = merge([user_latent, item_latent], mode = 'concat')
vector = concatenate([user_latent, item_latent])
# MLP layers
for idx in range(1, num_layer):
layer = Dense(layers[idx], kernel_regularizer=l2(reg_layers[idx]), activation='relu', name='layer%d' % idx)
vector = layer(vector)
# Final prediction layer
prediction = Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
name='prediction')(vector)
model_ = Model(inputs=[user_input, item_input],
outputs=prediction)
return model_
def getTrainMatrix(train):
num_users, num_items = train.shape
train_matrix = np.zeros([num_users, num_items], dtype=np.float)
for (u, i) in train.keys():
train_matrix[u][i] = train[u][i]
return train_matrix
def get_train_instances(train, num_negatives):
user_input, item_input, labels = [], [], []
num_users = train.shape[0]
# for (u, i) in train.keys():
# # positive instance
# user_input.append(u)
# item_input.append(i)
# labels.append(1)
# # negative instances
# for t in range(num_negatives):
# j = np.random.randint(num_items)
# # while train.has_key((u, j)):
# while (u, j) in train.keys():
# j = np.random.randint(num_items)
# user_input.append(u)
# item_input.append(j)
# labels.append(0)
for (u, i, rating) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(rating)
return user_input, item_input, labels
if __name__ == '__main__':
# args = parse_args()
# path = args.path
# dataset = args.dataset
# layers = eval(args.layers)
# reg_layers = eval(args.reg_layers)
# num_negatives = args.num_neg
# learner = args.learner
# learning_rate = args.lr
# batch_size = args.batch_size
# epochs = args.epochs
# verbose = args.verbose
#
# topK = 10
# evaluation_threads = 1 # mp.cpu_count()
# print("MLP arguments: %s " %(args))
# model_out_file = 'Pretrain/%s_MLP_%s_%d.h5' %(args.dataset, args.layers, time())
#
# # Loading data
# t1 = time()
# dataset = Dataset(args.path + args.dataset)
# train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
# num_users, num_items = train.shape
# print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
# % (time()-t1, num_users, num_items, train.nnz, len(testRatings)))
#
# # Build model
# model = get_model(train, num_users, num_items, layers, reg_layers)
# if learner.lower() == "adagrad":
# model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
# elif learner.lower() == "rmsprop":
# model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
# elif learner.lower() == "adam":
# model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
# else:
# model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
#
# # Check Init performance
# t1 = time()
# (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
# hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
# print('Init: HR = %.4f, NDCG = %.4f [%.1f]' % (hr, ndcg, time()-t1))
#
# # Train model
# best_hr, best_ndcg, best_iter = hr, ndcg, -1
# for epoch in range(epochs):
# t1 = time()
# # Generate training instances
# user_input, item_input, labels = get_train_instances(train, num_negatives)
#
# # Training
# hist = model.fit([np.array(user_input), np.array(item_input)], # input
# np.array(labels), # labels
# batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
# t2 = time()
#
# # Evaluation
# if epoch % verbose == 0:
# (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
# hr, ndcg, loss = np.array(hits).mean(), np.array(ndcgs).mean(), hist.history['loss'][0]
# print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
# % (epoch, t2-t1, hr, ndcg, loss, time()-t2))
# if hr > best_hr:
# best_hr, best_ndcg, best_iter = hr, ndcg, epoch
# if args.out > 0:
# model.save_weights(model_out_file, overwrite=True)
#
# print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg))
# if args.out > 0:
# print("The best MLP model is saved to %s" % model_out_file)
args = parse_args()
path = args.path
dataset = args.dataset
userlayers = eval(args.userlayers)
itemlayers = eval(args.itemlayers)
reg_layers = eval(args.reg_layers)
num_negatives = args.num_neg
learner = args.learner
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
topK = 10
evaluation_threads = 1 # mp.cpu_count()
print("DMF arguments: %s " % (args))
model_out_file = 'Pretrain/%s_DMF_%d.h5' % (args.dataset, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
% (time() - t1, num_users, num_items, train.nnz, len(testRatings)))
# print(train.shape)
# input("Feel free~")
# Build model
model = get_model(train, num_users, num_items, userlayers, itemlayers, reg_layers)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='mean_squared_error') # change mean_squared_error from binary_crossentropy
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='mean_squared_error')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='mean_squared_error')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='mean_squared_error')
# Check Init performance
t1 = time()
# (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
# hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
# hr, ndcg = 0, 0
# print('Init: HR = %.4f, NDCG = %.4f [%.1f]' % (hr, ndcg, time() - t1))
# best_hr, best_ndcg, best_iter = hr, ndcg, -1
(rmse, mae) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
print('Init: rmse = %.4f, mae = %.4f [%.1f]' % (rmse, mae, time() - t1))
best_rmse, best_mae, best_iter = rmse, mae, -1
# Train model
for epoch in range(epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train, num_negatives)
# print(len(user_input), len(item_input))
# print(user_matrix[user_input, :].shape, user_matrix[:, item_input].shape)
# Training
# hist = model.fit([np.array(user_input), np.array(item_input), np.array(user_matrix)], # input
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(rmse, mae) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
rmse_new, mae_new, loss = np.array(rmse).mean(), np.array(mae).mean(), hist.history['loss'][0]
print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2 - t1, rmse_new, mae_new, loss, time() - t2))
if rmse_new > best_rmse:
best_rmse, best_mae, best_iter = rmse_new, mae_new, epoch
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: rmse = %.4f, mae = %.4f. " % (best_iter, best_rmse, best_mae))
if args.out > 0:
print("The best DMF model is saved to %s" % model_out_file)
| 11,843 | 43.19403 | 139 | py |
BPAM | BPAM-master/Baselines/DeepCF/DMF_implicit2.py | import numpy as np
import tensorflow as tf
from keras import initializers
from keras.regularizers import l2
from keras.models import Model
from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, merge
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras import backend as K
from evaluate import evaluate_model
from Dataset import Dataset
from time import time
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run DMF.")
parser.add_argument('--path', nargs='?', default='Data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=20, # 原来是100
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--userlayers', nargs='?', default='[512, 64]',
help="Size of each user layer")
parser.add_argument('--itemlayers', nargs='?', default='[1024, 64]',
help="Size of each item layer")
parser.add_argument('--reg_layers', nargs='?', default='[0,0]',
help="Regularization for each layer")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.0001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
return parser.parse_args()
def get_model(train, num_users, num_items, userlayers=[512, 64], itemlayers=[1024, 64], reg_layers=[0, 0]):
# assert len(userlayers) == len(reg_layers)
# assert len(itemlayers) == len(reg_layers)
num_layer = len(userlayers) # Number of layers in the MLP
user_matrix = K.constant(getTrainMatrix(train))
item_matrix = K.constant(getTrainMatrix(train).T)
# Input variables
user = Input(shape = (1,), dtype='int32', name='user_input')
item = Input(shape = (1,), dtype='int32', name='item_input')
# Multi-hot User representation and Item representation
user_input = Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user)
item_input = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item)
# 不Reshape的话就变成是三个维度了,中间一个维度是1,Reshape后是二维的,第一个维度是batch,第二个是物品数/用户数
user_input = Reshape((num_items, ))(user_input)
item_input = Reshape((num_users, ))(item_input)
print(user_input, item_input)
# DMF part
userlayer = Dense(userlayers[0], kernel_regularizer=l2(reg_layers[0]), activation="linear" , name='user_layer0')
itemlayer = Dense(itemlayers[0], kernel_regularizer=l2(reg_layers[0]), activation="linear" , name='item_layer0')
user_latent_vector = userlayer(user_input)
item_latent_vector = itemlayer(item_input)
print(user_latent_vector.shape, item_latent_vector.shape)
for idx in range(1, num_layer):
userlayer = Dense(userlayers[idx], kernel_regularizer=l2(reg_layers[idx]), activation='relu', name='user_layer%d' % idx)
itemlayer = Dense(itemlayers[idx], kernel_regularizer=l2(reg_layers[idx]), activation='relu', name='item_layer%d' % idx)
user_latent_vector = userlayer(user_latent_vector)
item_latent_vector = itemlayer(item_latent_vector)
print(user_latent_vector.shape, item_latent_vector.shape)
# 改版前用merge的cos模式来求mini-batch下两个张量的余弦相似度,输出还得Reshape一下,否则会变成三个维度
# prediction = merge([user_latent_vector, item_latent_vector], mode='cos', dot_axes=1)
# prediction = Reshape((1,))(prediction)
# 改版后merge被弃用了,用Dot即可,把normalize设为True,则输出就是余弦相似度
# prediction = Dot(axes=1, normalize=True)([user_latent_vector, item_latent_vector])
# prediction = Lambda(lambda x: tf.maximum(x, 1e-6))(prediction)
predict_vector = multiply([user_latent_vector, item_latent_vector])
prediction = Dense(1,kernel_initializer=initializers.lecun_normal(), name='prediction')(predict_vector)
print(predict_vector)
model_ = Model(inputs=[user, item],
outputs=prediction)
return model_
def getTrainMatrix(train):
num_users, num_items = train.shape
train_matrix = np.zeros([num_users, num_items], dtype=np.float)
# print(train)
# for(u,i) in train.keys():
# print(u, i)
for (u, i) in train.keys():
# print(u,i)
train_matrix[u][i] = train[u, i]
return train_matrix
def get_train_instances(train, num_negatives):
user_input, item_input, labels = [], [], []
num_users = train.shape[0]
# for (u, i) in train.keys():
# # positive instance
# user_input.append(u)
# item_input.append(i)
# labels.append(1)
# # negative instances
# for t in range(num_negatives):
# j = np.random.randint(num_items)
# # while train.has_key((u, j)):
# while (u, j) in train.keys():
# j = np.random.randint(num_items)
# user_input.append(u)
# item_input.append(j)
# labels.append(0)
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(train[u,i])
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
path = args.path
dataset = args.dataset
userlayers = eval(args.userlayers)
itemlayers = eval(args.itemlayers)
reg_layers = eval(args.reg_layers)
num_negatives = args.num_neg
learner = args.learner
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
topK = 10
evaluation_threads = 1 # mp.cpu_count()
print("DMF arguments: %s " %(args))
model_out_file = 'Pretrain/%s_DMF_%d.h5' %(args.dataset, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
% (time()-t1, num_users, num_items, train.nnz, len(testRatings)))
# print(train.shape)
# input("Feel free~")
# Build model
model = get_model(train, num_users, num_items, userlayers, itemlayers, reg_layers)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='mean_squared_error') #change mean_squared_error from binary_crossentropy
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='mean_squared_error')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='mean_squared_error')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='mean_squared_error')
# Check Init performance
t1 = time()
# (hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
# hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
# hr, ndcg = 0, 0
# print('Init: HR = %.4f, NDCG = %.4f [%.1f]' % (hr, ndcg, time() - t1))
# best_hr, best_ndcg, best_iter = hr, ndcg, -1
(rmse, mae) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
rmse, mae = np.array(rmse).mean(), np.array(mae).mean()
print(rmse, mae)
rmse = rmse ** 0.5
# print(rmse, mae)
print('Init: rmse = %.4f, mae = %.4f [%.1f]' % (rmse, mae, time() - t1))
best_rmse, best_mae, best_iter = rmse, mae, -1
# Train model
for epoch in range(epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train, num_negatives)
# print(len(user_input), len(item_input))
# print(user_matrix[user_input, :].shape, user_matrix[:, item_input].shape)
# Training
# hist = model.fit([np.array(user_input), np.array(item_input), np.array(user_matrix)], # input
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(rmse, mae) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
rmse_new, mae_new, loss = np.array(rmse).mean(), np.array(mae).mean(), hist.history['loss'][0]
rmse_new = rmse_new ** 0.5
print(rmse_new, mae_new)
print('Iteration %d [%.1f s]: rmse = %.4f, mae = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2 - t1, rmse_new, mae_new, loss, time() - t2))
if rmse_new < best_rmse:
best_rmse, best_mae, best_iter = rmse_new, mae_new, epoch
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: rmse = %.4f, mae = %.4f. " % (best_iter, best_rmse, best_mae))
if args.out > 0:
print("The best DMF model is saved to %s" % model_out_file)
| 9,721 | 43.801843 | 138 | py |
BPAM | BPAM-master/Baselines/DeepCF/DeepCF4.py | import numpy as np
import tensorflow as tf
from keras import initializers
from keras.regularizers import l2
from keras.models import Model
from keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dot, Lambda, multiply, Reshape, multiply
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras import backend as K
from evaluate import evaluate_model
from Dataset import Dataset
from time import time
import argparse
import DMF_implicit2
import MLP2
def parse_args():
parser = argparse.ArgumentParser(description="Run DeepF.")
parser.add_argument('--path', nargs='?', default='data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=20,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[512,256,128,64]',
help="MLP layers. Note that the first layer is the concatenation "
"of user and item embeddings. So layers[0]/2 is the embedding size.")
parser.add_argument('--userlayers', nargs='?', default='[512, 64]',
help="Size of each user layer")
parser.add_argument('--itemlayers', nargs='?', default='[1024, 64]',
help="Size of each item layer")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.0001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='sgd',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=0,
help='Whether to save the trained model.')
parser.add_argument('--dmf_pretrain', nargs='?', default='',
help='Specify the pretrain model file for DMF part. If empty, no pretrain will be used')
parser.add_argument('--mlp_pretrain', nargs='?', default='',
help='Specify the pretrain model file for MLP part. If empty, no pretrain will be used')
return parser.parse_args()
def get_model(train, num_users, num_items, userlayers, itemlayers, layers):
# assert len(layers) == len(reg_layers)
dmf_num_layer = len(userlayers) #Number of layers in the DMF
mlp_num_layer = len(layers) #Number of layers in the MLP
user_matrix = K.constant(getTrainMatrix(train))
item_matrix = K.constant(getTrainMatrix(train).T)
#user_matrix = getTrainMatrix(train)
#item_matrix = user_matrix.T
# Input variables
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
# Embedding layer
user_rating= Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user_input)
item_rating = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item_input)
user_rating = Reshape((num_items, ))(user_rating)
item_rating = Reshape((num_users, ))(item_rating)
# DMF part
userlayer = Dense(userlayers[0], activation="linear" , name='user_layer0')
itemlayer = Dense(itemlayers[0], activation="linear" , name='item_layer0')
dmf_user_latent = userlayer(user_rating)
dmf_item_latent = itemlayer(item_rating)
for idx in range(1, dmf_num_layer):
userlayer = Dense(userlayers[idx], activation='relu', name='user_layer%d' % idx)
itemlayer = Dense(itemlayers[idx], activation='relu', name='item_layer%d' % idx)
dmf_user_latent = userlayer(dmf_user_latent)
dmf_item_latent = itemlayer(dmf_item_latent)
dmf_vector = multiply([dmf_user_latent, dmf_item_latent])
# MLP part
MLP_Embedding_User = Dense(layers[0]//2, activation="linear" , name='user_embedding')
MLP_Embedding_Item = Dense(layers[0]//2, activation="linear" , name='item_embedding')
mlp_user_latent = MLP_Embedding_User(user_rating)
mlp_item_latent = MLP_Embedding_Item(item_rating)
mlp_vector = concatenate([mlp_user_latent, mlp_item_latent])
for idx in range(1, mlp_num_layer):
layer = Dense(layers[idx], activation='relu', name="layer%d" % idx)
mlp_vector = layer(mlp_vector)
# Concatenate DMF and MLP parts
predict_vector = concatenate([dmf_vector, mlp_vector])
# Final prediction layer
prediction = Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
name="prediction")(predict_vector)
model_ = Model(inputs=[user_input, item_input],
outputs=prediction)
return model_
def getTrainMatrix(train):
num_users, num_items = train.shape
train_matrix = np.zeros([num_users, num_items], dtype=np.float)
for (u, i) in train.keys():
train_matrix[u][i] = train[u,i]
return train_matrix
def load_pretrain_model1(model, dmf_model, dmf_layers,):
# get_layer返回的是list
# MF embeddings
dmf_user_embeddings = dmf_model.get_layer('user_layer0').get_weights()
dmf_item_embeddings = dmf_model.get_layer('item_layer0').get_weights()
model.get_layer('user_layer0').set_weights(dmf_user_embeddings)
model.get_layer('item_layer0').set_weights(dmf_item_embeddings)
# DMF layers
for i in range(1, len(dmf_layers)):
dmf_user_layer_weights = dmf_model.get_layer('user_layer%d' % i).get_weights()
model.get_layer('user_layer%d' % i).set_weights(dmf_user_layer_weights)
dmf_item_layer_weights = dmf_model.get_layer('item_layer%d' % i).get_weights()
model.get_layer('item_layer%d' % i).set_weights(dmf_item_layer_weights)
# Prediction weights
dmf_prediction = dmf_model.get_layer('prediction').get_weights()
# print(dmf_prediction[0]) # 0是权重,是一个列表,每一个权重都单独再用一个列表包住,形如:[[1.1], [3.4], [1.2], [2.5]]
# print(dmf_prediction[1]) # 1是偏置,也是一个列表,形如:[1.4]
new_weights = np.concatenate((dmf_prediction[0], np.array([[0,]] * dmf_layers[-1])), axis=0)
new_b = dmf_prediction[1]
model.get_layer('prediction').set_weights([new_weights, new_b])
return model
def load_pretrain_model2(model, mlp_model, mlp_layers):
# MLP embeddings
mlp_user_embeddings = mlp_model.get_layer('user_embedding').get_weights()
mlp_item_embeddings = mlp_model.get_layer('item_embedding').get_weights()
model.get_layer('user_embedding').set_weights(mlp_user_embeddings)
model.get_layer('item_embedding').set_weights(mlp_item_embeddings)
# MLP layers
for i in range(1, len(mlp_layers)):
mlp_layer_weights = mlp_model.get_layer('layer%d' % i).get_weights()
model.get_layer('layer%d' % i).set_weights(mlp_layer_weights)
# Prediction weights
dmf_prediction = model.get_layer('prediction').get_weights()
mlp_prediction = mlp_model.get_layer('prediction').get_weights()
new_weights = np.concatenate((dmf_prediction[0][:mlp_layers[-1]], mlp_prediction[0]), axis=0)
new_b = dmf_prediction[1] + mlp_prediction[1]
# 0.5 means the contributions of MF and MLP are equal
model.get_layer('prediction').set_weights([0.5*new_weights, 0.5*new_b])
return model
def get_train_instances(train):
user_input, item_input, labels = [], [], []
num_users = train.shape[0]
# for (u, i) in train.keys():
# # positive instance
# user_input.append(u)
# item_input.append(i)
# labels.append(1)
# # negative instances
# for t in range(num_negatives):
# j = np.random.randint(num_items)
# # while train.has_key((u, j)):
# while (u, j) in train.keys():
# j = np.random.randint(num_items)
# user_input.append(u)
# item_input.append(j)
# labels.append(0)
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(train[u,i])
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
path = args.path
dataset = args.dataset
userlayers = eval(args.userlayers)
itemlayers = eval(args.itemlayers)
layers = eval(args.layers)
num_negatives = args.num_neg
learner = args.learner
learning_rate = args.lr
batch_size = args.batch_size
num_epochs = args.epochs
verbose = args.verbose
dmf_pretrain = args.dmf_pretrain
mlp_pretrain = args.mlp_pretrain
topK = 10
evaluation_threads = 1 # mp.cpu_count()
print("DeepCF arguments: %s " % args)
model_out_file = 'Pretrain/%s_DeepCF_%s_%s_%s_%d.h5' %(args.dataset, args.userlayers, args.itemlayers, args.layers, time())
# Loading data
t1 = time()
start = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings = dataset.trainMatrix, dataset.testRatings
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
%(time()-t1, num_users, num_items, train.nnz, len(testRatings)))
# Build model
model = get_model(train, num_users, num_items, userlayers, itemlayers, layers)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='mean_squared_error') # change mean_squared_error from binary_crossentropy
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='mean_squared_error')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='mean_squared_error')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='mean_squared_error')
# Load pretrain model
if dmf_pretrain != '' and mlp_pretrain != '':
dmf_model = DMF_implicit2.get_model(train, num_users, num_items, userlayers, itemlayers)
dmf_model.load_weights(dmf_pretrain)
model = load_pretrain_model1(model, dmf_model, userlayers)
del dmf_model
mlp_model = MLP2.get_model(train, num_users, num_items, layers)
mlp_model.load_weights(mlp_pretrain)
model = load_pretrain_model2(model, mlp_model, layers)
del mlp_model
print("Load pretrained DMF (%s) and MLP (%s) models done. " % (dmf_pretrain, mlp_pretrain))
# Check Init performance
(rmse, mae) = evaluate_model(model, testRatings, topK, evaluation_threads)
#rmse, mae = np.array(rmses).mean(), np.array(maes).mean()
#rmse = rmse ** 0.5
print('Init: rmse = %.4f, mae = %.4f' % (rmse, mae))
best_rmse, best_mae, best_iter = rmse, mae, -1
# 初始化后先保存一次,万一用预训练模型初始化的是最好的,后面性能一直变差,那就算直接停止程序,最好的也保存下来了
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
# Training model
for epoch in range(num_epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train)
# Training
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(rmses, maes) = evaluate_model(model, testRatings, topK, evaluation_threads)
rmse, mae, loss = rmses, maes, hist.history['loss'][0]
print('Iteration %d [%.1f s]: rmse = %.4f, mae = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2-t1, rmse, mae, loss, time()-t2))
if rmse < best_rmse:
best_rmse, best_mae, best_iter = rmse, mae, epoch
if args.out > 0: # 每次有更好的结果时就保存模型
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: rmse = %.4f, mae = %.4f. " %(best_iter, best_rmse, best_mae))
t = time()
print("the time is: ", t - start)
if args.out > 0:
print("The best DeepCF model is saved to %s" % model_out_file)
| 12,443 | 44.582418 | 139 | py |
BPAM | BPAM-master/Baselines/NMF/GMF.py | '''
Created on Aug 9, 2016
Keras Implementation of Generalized Matrix Factorization (GMF) recommender model in:
He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017.
@author: Xiangnan He (xiangnanhe@gmail.com)
'''
import numpy as np
import theano.tensor as T
from keras import backend as K
from keras import initializers
from keras.models import Sequential, Model, load_model, save_model
from keras.layers.core import Dense, Lambda, Activation
from keras.layers import Embedding, Input, Dense, merge, Reshape, Flatten
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras.regularizers import l2
from Dataset import Dataset
from evaluate import evaluate_model
from time import time
import multiprocessing as mp
import sys
import math
import argparse
#################### Arguments ####################
def parse_args():
parser = argparse.ArgumentParser(description="Run GMF.")
parser.add_argument('--path', nargs='?', default='Data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--num_factors', type=int, default=8,
help='Embedding size.')
parser.add_argument('--regs', nargs='?', default='[0,0]',
help="Regularization for user and item embeddings.")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
return parser.parse_args()
def init_normal(shape, name=None):
return initializers.normal(shape,stddev=0.01, name=name)
def get_model(num_users, num_items, latent_dim, regs=[0,0]):
# Input variables
user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
item_input = Input(shape=(1,), dtype='int32', name = 'item_input')
MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding',
init = init_normal, W_regularizer = l2(regs[0]), input_length=1)
MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding',
init = init_normal, W_regularizer = l2(regs[1]), input_length=1)
# Crucial to flatten an embedding vector!
user_latent = Flatten()(MF_Embedding_User(user_input))
item_latent = Flatten()(MF_Embedding_Item(item_input))
# Element-wise product of user and item embeddings
predict_vector = merge([user_latent, item_latent], mode = 'mul')
# Final prediction layer
#prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector)
prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector)
model = Model(input=[user_input, item_input],
output=prediction)
return model
def get_train_instances(train):
user_input, item_input, labels = [],[],[]
num_users = train.shape[0]
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(train[u,i])
# # negative instances
# for t in xrange(num_negatives):
# j = np.random.randint(num_items)
# while train.has_key((u, j)):
# j = np.random.randint(num_items)
# user_input.append(u)
# item_input.append(j)
# labels.append(0)
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
num_factors = args.num_factors
regs = eval(args.regs)
num_negatives = args.num_neg
learner = args.learner
learning_rate = args.lr
epochs = args.epochs
batch_size = args.batch_size
verbose = args.verbose
topK = 10
evaluation_threads = 1 #mp.cpu_count()
print("GMF arguments: %s" %(args))
model_out_file = 'Pretrain/%s_GMF_%d_%d.h5' %(args.dataset, num_factors, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings = dataset.trainMatrix, dataset.testRatings
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
%(time()-t1, num_users, num_items, train.nnz, len(testRatings)))
# Build model
model = get_model(num_users, num_items, num_factors, regs)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
#print(model.summary())
# Init performance
t1 = time()
# Check Init performance
(rmse, mae) = evaluate_model(model, testRatings, topK, evaluation_threads)
#rmse, mae = np.array(rmses).mean(), np.array(maes).mean()
#rmse = rmse ** 0.5
print('Init: rmse = %.4f, mae = %.4f' % (rmse, mae))
best_rmse, best_mae, best_iter = rmse, mae, -1
# 初始化后先保存一次,万一用预训练模型初始化的是最好的,后面性能一直变差,那就算直接停止>程序,最好的也保存下来了
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
# Training model
for epoch in range(num_epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train)
# Training
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(rmses, maes) = evaluate_model(model, testRatings, topK, evaluation_threads)
rmse, mae, loss = rmses, maes, hist.history['loss'][0]
print('Iteration %d [%.1f s]: rmse = %.4f, mae = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2-t1, rmse, mae, loss, time()-t2))
if rmse < best_rmse:
best_rmse, best_mae, best_iter = rmse, mae, epoch
if args.out > 0: # 每次有更好的结果时就保存模型
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: rmse = %.4f, mae = %.4f. " %(best_iter, best_rmse, best_mae))
if args.out > 0:
print("The best DeepCF model is saved to %s" % model_out_file)
| 7,381 | 41.425287 | 106 | py |
BPAM | BPAM-master/Baselines/NMF/NeuMF.py | '''
Created on Aug 9, 2016
Keras Implementation of Neural Matrix Factorization (NeuMF) recommender model in:
He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017.
@author: Xiangnan He (xiangnanhe@gmail.com)
'''
import numpy as np
import theano
import theano.tensor as T
import keras
from keras import backend as K
from keras import initializers
from keras.regularizers import l1, l2, l1_l2
from keras.models import Sequential, Model
from keras.layers.core import Dense, Lambda, Activation
from keras.layers import Embedding, multiply, Input, concatenate, Dense, merge, Reshape, Flatten, Dropout
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from evaluate import evaluate_model
from Dataset import Dataset
from time import time
import sys
import GMF, MLP
import argparse
#################### Arguments ####################
def parse_args():
parser = argparse.ArgumentParser(description="Run NeuMF.")
parser.add_argument('--path', nargs='?', default='../DeepCF/data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--num_factors', type=int, default=8,
help='Embedding size of MF model.')
parser.add_argument('--layers', nargs='?', default='[64,32,16,8]',
help="MLP layers. Note that the first layer is the concatenation of user and item embeddings. So layers[0]/2 is the embedding size.")
parser.add_argument('--reg_mf', type=float, default=0,
help='Regularization for MF embeddings.')
parser.add_argument('--reg_layers', nargs='?', default='[0,0,0,0]',
help="Regularization for each MLP layer. reg_layers[0] is the regularization for embeddings.")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
parser.add_argument('--mf_pretrain', nargs='?', default='',
help='Specify the pretrain model file for MF part. If empty, no pretrain will be used')
parser.add_argument('--mlp_pretrain', nargs='?', default='',
help='Specify the pretrain model file for MLP part. If empty, no pretrain will be used')
return parser.parse_args()
def init_normal(name=None):
return initializers.VarianceScaling(scale=0.01)
def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0):
assert len(layers) == len(reg_layers)
num_layer = len(layers) #Number of layers in the MLP
# Input variables
user_input = Input(shape = (1,), dtype='int32', name = 'user_input')
item_input = Input(shape = (1,), dtype='int32', name = 'item_input')
# Embedding layer
MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user',
W_regularizer = l2(reg_mf), input_length=1)
MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item',
W_regularizer = l2(reg_mf), input_length=1)
MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = int(layers[0]/2), name = "mlp_embedding_user",
W_regularizer = l2(reg_layers[0]), input_length=1)
MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = int(layers[0]/2), name = 'mlp_embedding_item',
W_regularizer = l2(reg_layers[0]), input_length=1)
# MF part
mf_user_latent = Flatten()(MF_Embedding_User(user_input))
mf_item_latent = Flatten()(MF_Embedding_Item(item_input))
#mf_vector = merge([mf_user_latent, mf_item_latent], mode = 'mul') # element-wise multiply
mf_vector = multiply([mf_user_latent, mf_item_latent])
# MLP part
mlp_user_latent = Flatten()(MLP_Embedding_User(user_input))
mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input))
mlp_vector = concatenate([mlp_user_latent, mlp_item_latent])
for idx in range(1, num_layer):
layer = Dense(layers[idx], W_regularizer= l2(reg_layers[idx]), activation='relu', name="layer%d" %idx)
mlp_vector = layer(mlp_vector)
# Concatenate MF and MLP parts
#mf_vector = Lambda(lambda x: x * alpha)(mf_vector)
#mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector)
#predict_vector = merge([mf_vector, mlp_vector], mode = 'concat')
predict_vector = concatenate([mf_vector, mlp_vector])
# Final prediction layer
prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = "prediction")(predict_vector)
model = Model(input=[user_input, item_input],
output=prediction)
return model
def load_pretrain_model(model, gmf_model, mlp_model, num_layers):
# MF embeddings
gmf_user_embeddings = gmf_model.get_layer('user_embedding').get_weights()
gmf_item_embeddings = gmf_model.get_layer('item_embedding').get_weights()
model.get_layer('mf_embedding_user').set_weights(gmf_user_embeddings)
model.get_layer('mf_embedding_item').set_weights(gmf_item_embeddings)
# MLP embeddings
mlp_user_embeddings = mlp_model.get_layer('user_embedding').get_weights()
mlp_item_embeddings = mlp_model.get_layer('item_embedding').get_weights()
model.get_layer('mlp_embedding_user').set_weights(mlp_user_embeddings)
model.get_layer('mlp_embedding_item').set_weights(mlp_item_embeddings)
# MLP layers
for i in range(1, num_layers):
mlp_layer_weights = mlp_model.get_layer('layer%d' %i).get_weights()
model.get_layer('layer%d' %i).set_weights(mlp_layer_weights)
# Prediction weights
gmf_prediction = gmf_model.get_layer('prediction').get_weights()
mlp_prediction = mlp_model.get_layer('prediction').get_weights()
new_weights = np.concatenate((gmf_prediction[0], mlp_prediction[0]), axis=0)
new_b = gmf_prediction[1] + mlp_prediction[1]
model.get_layer('prediction').set_weights([0.5*new_weights, 0.5*new_b])
return model
def get_train_instances(train):
user_input, item_input, labels = [],[],[]
num_users = train.shape[0]
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(train[u,i])
# # negative instances
# for t in xrange(num_negatives):
# j = np.random.randint(num_items)
# while train.has_key((u, j)):
# j = np.random.randint(num_items)
# user_input.append(u)
# item_input.append(j)
# labels.append(0)
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
num_epochs = args.epochs
batch_size = args.batch_size
mf_dim = args.num_factors
layers = eval(args.layers)
reg_mf = args.reg_mf
reg_layers = eval(args.reg_layers)
num_negatives = args.num_neg
learning_rate = args.lr
learner = args.learner
verbose = args.verbose
mf_pretrain = args.mf_pretrain
mlp_pretrain = args.mlp_pretrain
topK = 10
evaluation_threads = 1#mp.cpu_count()
print("NeuMF arguments: %s " %(args))
model_out_file = 'Pretrain/%s_NeuMF_%d_%s_%d.h5' %(args.dataset, mf_dim, args.layers, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings = dataset.trainMatrix, dataset.testRatings
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
%(time()-t1, num_users, num_items, train.nnz, len(testRatings)))
# Build model
model = get_model(num_users, num_items, mf_dim, layers, reg_layers, reg_mf)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='MAE')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
# Load pretrain model
if mf_pretrain != '' and mlp_pretrain != '':
gmf_model = GMF.get_model(num_users,num_items,mf_dim)
gmf_model.load_weights(mf_pretrain)
mlp_model = MLP.get_model(num_users,num_items, layers, reg_layers)
mlp_model.load_weights(mlp_pretrain)
model = load_pretrain_model(model, gmf_model, mlp_model, len(layers))
print("Load pretrained GMF (%s) and MLP (%s) models done. " %(mf_pretrain, mlp_pretrain))
# Init performance
t1 = time()
start = time()
# Check Init performance
(rmse, mae) = evaluate_model(model, testRatings, topK, evaluation_threads)
#rmse, mae = np.array(rmses).mean(), np.array(maes).mean()
#rmse = rmse ** 0.5
print('Init: rmse = %.4f, mae = %.4f' % (rmse, mae))
best_rmse, best_mae, best_iter = rmse, mae, -1
# 初始化后先保存一次,万一用预训练模型初始化的是最好的,后面性能一直变差,那就算直接停止>程序,最好的也保存下来了
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
# Training model
for epoch in range(num_epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train)
# Training
hist = model.fit([np.array(user_input), np.array(item_input)], # input
np.array(labels), # labels
batch_size=batch_size, epochs=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch % verbose == 0:
(rmses, maes) = evaluate_model(model, testRatings, topK, evaluation_threads)
rmse, mae, loss = rmses, maes, hist.history['loss'][0]
print('Iteration %d [%.1f s]: rmse = %.4f, mae = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2-t1, rmse, mae, loss, time()-t2))
if rmse < best_rmse:
best_rmse, best_mae, best_iter = rmse, mae, epoch
if args.out > 0: # 每次有更好的结果时就保存模型
model.save_weights(model_out_file, overwrite=True)
print("the time is: " , time() - start)
print("End. Best Iteration %d: rmse = %.4f, mae = %.4f. " %(best_iter, best_rmse, best_mae))
if args.out > 0:
print("The best DeepCF model is saved to %s" % model_out_file)
| 11,298 | 46.079167 | 157 | py |
BPAM | BPAM-master/Baselines/NMF/MLP.py | '''
Created on Aug 9, 2016
Keras Implementation of Multi-Layer Perceptron (GMF) recommender model in:
He Xiangnan et al. Neural Collaborative Filtering. In WWW 2017.
@author: Xiangnan He (xiangnanhe@gmail.com)
'''
import numpy as np
import theano
import theano.tensor as T
import keras
from keras import backend as K
from keras import initializers
from keras.regularizers import l2
from keras.models import Sequential, Model
from keras.layers.core import Dense, Lambda, Activation
from keras.layers import Embedding, Input, Dense, merge, Reshape, Flatten, Dropout
from keras.constraints import maxnorm
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from evaluate import evaluate_model
from Dataset import Dataset
from time import time
import sys
import argparse
import multiprocessing as mp
#################### Arguments ####################
def parse_args():
parser = argparse.ArgumentParser(description="Run MLP.")
parser.add_argument('--path', nargs='?', default='Data/',
help='Input data path.')
parser.add_argument('--dataset', nargs='?', default='ml-1m',
help='Choose a dataset.')
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[64,32,16,8]',
help="Size of each layer. Note that the first layer is the concatenation of user and item embeddings. So layers[0]/2 is the embedding size.")
parser.add_argument('--reg_layers', nargs='?', default='[0,0,0,0]',
help="Regularization for each layer")
parser.add_argument('--num_neg', type=int, default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--learner', nargs='?', default='adam',
help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
parser.add_argument('--verbose', type=int, default=1,
help='Show performance per X iterations')
parser.add_argument('--out', type=int, default=1,
help='Whether to save the trained model.')
return parser.parse_args()
def init_normal(shape, name=None):
return initializers.normal(shape, scale=0.01, name=name)
def get_model(num_users, num_items, layers = [20,10], reg_layers=[0,0]):
assert len(layers) == len(reg_layers)
num_layer = len(layers) #Number of layers in the MLP
# Input variables
user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
item_input = Input(shape=(1,), dtype='int32', name = 'item_input')
MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = 'user_embedding',
init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1)
MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'item_embedding',
init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1)
# Crucial to flatten an embedding vector!
user_latent = Flatten()(MLP_Embedding_User(user_input))
item_latent = Flatten()(MLP_Embedding_Item(item_input))
# The 0-th layer is the concatenation of embedding layers
vector = merge([user_latent, item_latent], mode = 'concat')
# MLP layers
for idx in xrange(1, num_layer):
layer = Dense(layers[idx], W_regularizer= l2(reg_layers[idx]), activation='relu', name = 'layer%d' %idx)
vector = layer(vector)
# Final prediction layer
prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(vector)
model = Model(input=[user_input, item_input],
output=prediction)
return model
def get_train_instances(train, num_negatives):
user_input, item_input, labels = [],[],[]
num_users = train.shape[0]
for (u, i) in train.keys():
# positive instance
user_input.append(u)
item_input.append(i)
labels.append(1)
# negative instances
for t in xrange(num_negatives):
j = np.random.randint(num_items)
while train.has_key((u, j)):
j = np.random.randint(num_items)
user_input.append(u)
item_input.append(j)
labels.append(0)
return user_input, item_input, labels
if __name__ == '__main__':
args = parse_args()
path = args.path
dataset = args.dataset
layers = eval(args.layers)
reg_layers = eval(args.reg_layers)
num_negatives = args.num_neg
learner = args.learner
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
topK = 10
evaluation_threads = 1 #mp.cpu_count()
print("MLP arguments: %s " %(args))
model_out_file = 'Pretrain/%s_MLP_%s_%d.h5' %(args.dataset, args.layers, time())
# Loading data
t1 = time()
dataset = Dataset(args.path + args.dataset)
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d"
%(time()-t1, num_users, num_items, train.nnz, len(testRatings)))
# Build model
model = get_model(num_users, num_items, layers, reg_layers)
if learner.lower() == "adagrad":
model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "rmsprop":
model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
elif learner.lower() == "adam":
model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
else:
model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')
# Check Init performance
t1 = time()
(hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
hr, ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print('Init: HR = %.4f, NDCG = %.4f [%.1f]' %(hr, ndcg, time()-t1))
# Train model
best_hr, best_ndcg, best_iter = hr, ndcg, -1
for epoch in xrange(epochs):
t1 = time()
# Generate training instances
user_input, item_input, labels = get_train_instances(train, num_negatives)
# Training
hist = model.fit([np.array(user_input), np.array(item_input)], #input
np.array(labels), # labels
batch_size=batch_size, nb_epoch=1, verbose=0, shuffle=True)
t2 = time()
# Evaluation
if epoch %verbose == 0:
(hits, ndcgs) = evaluate_model(model, testRatings, testNegatives, topK, evaluation_threads)
hr, ndcg, loss = np.array(hits).mean(), np.array(ndcgs).mean(), hist.history['loss'][0]
print('Iteration %d [%.1f s]: HR = %.4f, NDCG = %.4f, loss = %.4f [%.1f s]'
% (epoch, t2-t1, hr, ndcg, loss, time()-t2))
if hr > best_hr:
best_hr, best_ndcg, best_iter = hr, ndcg, epoch
if args.out > 0:
model.save_weights(model_out_file, overwrite=True)
print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg))
if args.out > 0:
print("The best MLP model is saved to %s" %(model_out_file))
| 7,721 | 42.139665 | 165 | py |
t3f | t3f-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# t3f documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 12 10:06:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../t3f'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
is_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if is_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'nbsphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u't3f'
copyright = u'2017, Alexander Novikov, Pavel Izmailov, Ivan Oseledets, Michael Figurnov, Valentin Khrulkov'
author = u'Alexander Novikov, Pavel Izmailov, Ivan Oseledets, Michael Figurnov, Valentin Khrulkov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 't3fdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 't3f.tex', u't3f Documentation',
u'Alexander Novikov, Pavel Izmailov, Ivan Oseledets, Michael Figurnov, Valentin Khrulkov', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 't3f', u't3f Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 't3f', u't3f Documentation',
author, 't3f', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 5,326 | 30.708333 | 107 | py |
t3f | t3f-master/t3f/nn.py | """Utils for simplifying building neural networks with TT-layers"""
from itertools import count
import numpy as np
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Activation
import t3f
import tensorflow as tf
class KerasDense(Layer):
_counter = count(0)
def __init__(self, input_dims, output_dims, tt_rank=2,
activation=None, use_bias=True, kernel_initializer='glorot',
bias_initializer=0.1, **kwargs):
"""Creates a TT-Matrix based Dense Keras layer.
Args:
input_dims: an array, tensor shape of the matrix row index
ouput_dims: an array, tensor shape of the matrix column index
tt_rank: a number or an array, desired tt-rank of the TT-Matrix
activation: [None] string or None, specifies the activation function.
use_bias: bool, whether to use bias
kernel_initializer: string specifying initializer for the TT-Matrix.
Possible values are 'glorot', 'he', and 'lecun'.
bias_initializer: a number, initialization value of the bias
Returns:
Layer object corresponding to multiplication by a TT-Matrix
followed by addition of a bias and applying
an elementwise activation
Raises:
ValueError if the provided activation or kernel_initializer is
unknown.
"""
self.counter = next(self._counter)
self.tt_shape = [input_dims, output_dims]
self.output_dim = np.prod(output_dims)
self.tt_rank = tt_rank
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
name = 'tt_dense_{}'.format(self.counter)
if self.kernel_initializer == 'glorot':
initializer = t3f.glorot_initializer(self.tt_shape,
tt_rank=self.tt_rank)
elif self.kernel_initializer == 'he':
initializer = t3f.he_initializer(self.tt_shape,
tt_rank=self.tt_rank)
elif self.kernel_initializer == 'lecun':
initializer = t3f.lecun_initializer(self.tt_shape,
tt_rank=self.tt_rank)
else:
raise ValueError('Unknown kernel_initializer "%s", only "glorot",'
'"he", and "lecun" are supported'
% self.kernel_initializer)
self.matrix = t3f.get_variable('matrix', initializer=initializer)
self._tt_cores = self.matrix.tt_cores
self.b = None
if self.use_bias:
self.b = tf.Variable(self.bias_initializer * tf.ones((self.output_dim,)))
super(KerasDense, self).__init__(name=name, **kwargs)
def call(self, x):
res = t3f.matmul(x, self.matrix)
if self.use_bias:
res += self.b
if self.activation is not None:
res = Activation(self.activation)(res)
return res
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
| 2,954 | 36.884615 | 79 | py |
lime | lime-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# lime documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 18 16:20:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
libpath = os.path.join(curr_path, '../')
sys.path.insert(0, libpath)
sys.path.insert(0, curr_path)
import mock
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'scipy.special',
'scipy.stats', 'scipy.stats.distributions', 'sklearn', 'sklearn.preprocessing',
'sklearn.linear_model', 'matplotlib',
'sklearn.datasets', 'sklearn.ensemble', 'sklearn.cross_validation',
'sklearn.feature_extraction', 'sklearn.feature_extraction.text',
'sklearn.metrics', 'sklearn.naive_bayes', 'sklearn.pipeline',
'sklearn.utils', 'pyDOE2',]
# for mod_name in MOCK_MODULES:
# sys.modules[mod_name] = mock.Mock()
import scipy
import scipy.stats
import scipy.stats.distributions
import lime
import lime.lime_text
import lime.lime_tabular
import lime.explanation
import lime.lime_base
import lime.submodular_pick
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lime'
copyright = u'2016, Marco Tulio Ribeiro'
author = u'Marco Tulio Ribeiro'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'limedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lime.tex', u'lime Documentation',
u'Marco Tulio Ribeiro', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lime', u'lime Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lime', u'lime Documentation',
author, 'lime', 'One line description of project.',
'Miscellaneous'),
]
autoclass_content = 'both'
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 10,195 | 31.368254 | 95 | py |
lime | lime-master/lime/lime_tabular.py | """
Functions for explaining classifiers that use tabular data (matrices).
"""
import collections
import copy
from functools import partial
import json
import warnings
import numpy as np
import scipy as sp
import sklearn
import sklearn.preprocessing
from sklearn.utils import check_random_state
from pyDOE2 import lhs
from scipy.stats.distributions import norm
from lime.discretize import QuartileDiscretizer
from lime.discretize import DecileDiscretizer
from lime.discretize import EntropyDiscretizer
from lime.discretize import BaseDiscretizer
from lime.discretize import StatsDiscretizer
from . import explanation
from . import lime_base
class TableDomainMapper(explanation.DomainMapper):
"""Maps feature ids to names, generates table views, etc"""
def __init__(self, feature_names, feature_values, scaled_row,
categorical_features, discretized_feature_names=None,
feature_indexes=None):
"""Init.
Args:
feature_names: list of feature names, in order
feature_values: list of strings with the values of the original row
scaled_row: scaled row
categorical_features: list of categorical features ids (ints)
feature_indexes: optional feature indexes used in the sparse case
"""
self.exp_feature_names = feature_names
self.discretized_feature_names = discretized_feature_names
self.feature_names = feature_names
self.feature_values = feature_values
self.feature_indexes = feature_indexes
self.scaled_row = scaled_row
if sp.sparse.issparse(scaled_row):
self.all_categorical = False
else:
self.all_categorical = len(categorical_features) == len(scaled_row)
self.categorical_features = categorical_features
def map_exp_ids(self, exp):
"""Maps ids to feature names.
Args:
exp: list of tuples [(id, weight), (id,weight)]
Returns:
list of tuples (feature_name, weight)
"""
names = self.exp_feature_names
if self.discretized_feature_names is not None:
names = self.discretized_feature_names
return [(names[x[0]], x[1]) for x in exp]
def visualize_instance_html(self,
exp,
label,
div_name,
exp_object_name,
show_table=True,
show_all=False):
"""Shows the current example in a table format.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
div_name: name of div object to be used for rendering(in js)
exp_object_name: name of js explanation object
show_table: if False, don't show table visualization.
show_all: if True, show zero-weighted features in the table.
"""
if not show_table:
return ''
weights = [0] * len(self.feature_names)
for x in exp:
weights[x[0]] = x[1]
if self.feature_indexes is not None:
# Sparse case: only display the non-zero values and importances
fnames = [self.exp_feature_names[i] for i in self.feature_indexes]
fweights = [weights[i] for i in self.feature_indexes]
if show_all:
out_list = list(zip(fnames,
self.feature_values,
fweights))
else:
out_dict = dict(map(lambda x: (x[0], (x[1], x[2], x[3])),
zip(self.feature_indexes,
fnames,
self.feature_values,
fweights)))
out_list = [out_dict.get(x[0], (str(x[0]), 0.0, 0.0)) for x in exp]
else:
out_list = list(zip(self.exp_feature_names,
self.feature_values,
weights))
if not show_all:
out_list = [out_list[x[0]] for x in exp]
ret = u'''
%s.show_raw_tabular(%s, %d, %s);
''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)
return ret
class LimeTabularExplainer(object):
"""Explains predictions on tabular (i.e. matrix) data.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to the
means and stds in the training data. For categorical features, perturb by
sampling according to the training distribution, and making a binary
feature that is 1 when the value is the same as the instance being
explained."""
def __init__(self,
training_data,
mode="classification",
training_labels=None,
feature_names=None,
categorical_features=None,
categorical_names=None,
kernel_width=None,
kernel=None,
verbose=False,
class_names=None,
feature_selection='auto',
discretize_continuous=True,
discretizer='quartile',
sample_around_instance=False,
random_state=None,
training_data_stats=None):
"""Init function.
Args:
training_data: numpy 2d array
mode: "classification" or "regression"
training_labels: labels for training data. Not required, but may be
used by discretizer.
feature_names: list of names (strings) corresponding to the columns
in the training data.
categorical_features: list of indices (ints) corresponding to the
categorical columns. Everything else will be considered
continuous. Values in these columns MUST be integers.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
kernel_width: kernel width for the exponential kernel.
If None, defaults to sqrt (number of columns) * 0.75
kernel: similarity kernel that takes euclidean distances and kernel
width as input and outputs weights in (0,1). If None, defaults to
an exponential kernel.
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
discretize_continuous: if True, all non-categorical features will
be discretized into quartiles.
discretizer: only matters if discretize_continuous is True
and data is not sparse. Options are 'quartile', 'decile',
'entropy' or a BaseDiscretizer instance.
sample_around_instance: if True, will sample continuous features
in perturbed samples from a normal centered at the instance
being explained. Otherwise, the normal is centered on the mean
of the feature data.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
training_data_stats: a dict object having the details of training data
statistics. If None, training data information will be used, only matters
if discretize_continuous is True. Must have the following keys:
means", "mins", "maxs", "stds", "feature_values",
"feature_frequencies"
"""
self.random_state = check_random_state(random_state)
self.mode = mode
self.categorical_names = categorical_names or {}
self.sample_around_instance = sample_around_instance
self.training_data_stats = training_data_stats
# Check and raise proper error in stats are supplied in non-descritized path
if self.training_data_stats:
self.validate_training_data_stats(self.training_data_stats)
if categorical_features is None:
categorical_features = []
if feature_names is None:
feature_names = [str(i) for i in range(training_data.shape[1])]
self.categorical_features = list(categorical_features)
self.feature_names = list(feature_names)
self.discretizer = None
if discretize_continuous and not sp.sparse.issparse(training_data):
# Set the discretizer if training data stats are provided
if self.training_data_stats:
discretizer = StatsDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
data_stats=self.training_data_stats,
random_state=self.random_state)
if discretizer == 'quartile':
self.discretizer = QuartileDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
random_state=self.random_state)
elif discretizer == 'decile':
self.discretizer = DecileDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
random_state=self.random_state)
elif discretizer == 'entropy':
self.discretizer = EntropyDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
random_state=self.random_state)
elif isinstance(discretizer, BaseDiscretizer):
self.discretizer = discretizer
else:
raise ValueError('''Discretizer must be 'quartile',''' +
''' 'decile', 'entropy' or a''' +
''' BaseDiscretizer instance''')
self.categorical_features = list(range(training_data.shape[1]))
# Get the discretized_training_data when the stats are not provided
if(self.training_data_stats is None):
discretized_training_data = self.discretizer.discretize(
training_data)
if kernel_width is None:
kernel_width = np.sqrt(training_data.shape[1]) * .75
kernel_width = float(kernel_width)
if kernel is None:
def kernel(d, kernel_width):
return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))
kernel_fn = partial(kernel, kernel_width=kernel_width)
self.feature_selection = feature_selection
self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)
self.class_names = class_names
# Though set has no role to play if training data stats are provided
self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)
self.scaler.fit(training_data)
self.feature_values = {}
self.feature_frequencies = {}
for feature in self.categorical_features:
if training_data_stats is None:
if self.discretizer is not None:
column = discretized_training_data[:, feature]
else:
column = training_data[:, feature]
feature_count = collections.Counter(column)
values, frequencies = map(list, zip(*(sorted(feature_count.items()))))
else:
values = training_data_stats["feature_values"][feature]
frequencies = training_data_stats["feature_frequencies"][feature]
self.feature_values[feature] = values
self.feature_frequencies[feature] = (np.array(frequencies) /
float(sum(frequencies)))
self.scaler.mean_[feature] = 0
self.scaler.scale_[feature] = 1
@staticmethod
def convert_and_round(values):
return ['%.2f' % v for v in values]
@staticmethod
def validate_training_data_stats(training_data_stats):
"""
Method to validate the structure of training data stats
"""
stat_keys = list(training_data_stats.keys())
valid_stat_keys = ["means", "mins", "maxs", "stds", "feature_values", "feature_frequencies"]
missing_keys = list(set(valid_stat_keys) - set(stat_keys))
if len(missing_keys) > 0:
raise Exception("Missing keys in training_data_stats. Details: %s" % (missing_keys))
def explain_instance(self,
data_row,
predict_fn,
labels=(1,),
top_labels=None,
num_features=10,
num_samples=5000,
distance_metric='euclidean',
model_regressor=None,
sampling_method='gaussian'):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly perturbing features
from the instance (see __data_inverse). We then learn locally weighted
linear models on this neighborhood data to explain each of the classes
in an interpretable way (see lime_base.py).
Args:
data_row: 1d numpy array or scipy.sparse matrix, corresponding to a row
predict_fn: prediction function. For classifiers, this should be a
function that takes a numpy array and outputs prediction
probabilities. For regressors, this takes a numpy array and
returns the predictions. For ScikitClassifiers, this is
`classifier.predict_proba()`. For ScikitRegressors, this
is `regressor.predict()`. The prediction function needs to work
on multiple feature vectors (the vectors randomly perturbed
from the data_row).
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
distance_metric: the distance metric to use for weights.
model_regressor: sklearn regressor to use in explanation. Defaults
to Ridge regression in LimeBase. Must have model_regressor.coef_
and 'sample_weight' as a parameter to model_regressor.fit()
sampling_method: Method to sample synthetic data. Defaults to Gaussian
sampling. Can also use Latin Hypercube Sampling.
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
if sp.sparse.issparse(data_row) and not sp.sparse.isspmatrix_csr(data_row):
# Preventative code: if sparse, convert to csr format if not in csr format already
data_row = data_row.tocsr()
data, inverse = self.__data_inverse(data_row, num_samples, sampling_method)
if sp.sparse.issparse(data):
# Note in sparse case we don't subtract mean since data would become dense
scaled_data = data.multiply(self.scaler.scale_)
# Multiplying with csr matrix can return a coo sparse matrix
if not sp.sparse.isspmatrix_csr(scaled_data):
scaled_data = scaled_data.tocsr()
else:
scaled_data = (data - self.scaler.mean_) / self.scaler.scale_
distances = sklearn.metrics.pairwise_distances(
scaled_data,
scaled_data[0].reshape(1, -1),
metric=distance_metric
).ravel()
yss = predict_fn(inverse)
# for classification, the model needs to provide a list of tuples - classes
# along with prediction probabilities
if self.mode == "classification":
if len(yss.shape) == 1:
raise NotImplementedError("LIME does not currently support "
"classifier models without probability "
"scores. If this conflicts with your "
"use case, please let us know: "
"https://github.com/datascienceinc/lime/issues/16")
elif len(yss.shape) == 2:
if self.class_names is None:
self.class_names = [str(x) for x in range(yss[0].shape[0])]
else:
self.class_names = list(self.class_names)
if not np.allclose(yss.sum(axis=1), 1.0):
warnings.warn("""
Prediction probabilties do not sum to 1, and
thus does not constitute a probability space.
Check that you classifier outputs probabilities
(Not log probabilities, or actual class predictions).
""")
else:
raise ValueError("Your model outputs "
"arrays with {} dimensions".format(len(yss.shape)))
# for regression, the output should be a one-dimensional array of predictions
else:
try:
if len(yss.shape) != 1 and len(yss[0].shape) == 1:
yss = np.array([v[0] for v in yss])
assert isinstance(yss, np.ndarray) and len(yss.shape) == 1
except AssertionError:
raise ValueError("Your model needs to output single-dimensional \
numpyarrays, not arrays of {} dimensions".format(yss.shape))
predicted_value = yss[0]
min_y = min(yss)
max_y = max(yss)
# add a dimension to be compatible with downstream machinery
yss = yss[:, np.newaxis]
feature_names = copy.deepcopy(self.feature_names)
if feature_names is None:
feature_names = [str(x) for x in range(data_row.shape[0])]
if sp.sparse.issparse(data_row):
values = self.convert_and_round(data_row.data)
feature_indexes = data_row.indices
else:
values = self.convert_and_round(data_row)
feature_indexes = None
for i in self.categorical_features:
if self.discretizer is not None and i in self.discretizer.lambdas:
continue
name = int(data_row[i])
if i in self.categorical_names:
name = self.categorical_names[i][name]
feature_names[i] = '%s=%s' % (feature_names[i], name)
values[i] = 'True'
categorical_features = self.categorical_features
discretized_feature_names = None
if self.discretizer is not None:
categorical_features = range(data.shape[1])
discretized_instance = self.discretizer.discretize(data_row)
discretized_feature_names = copy.deepcopy(feature_names)
for f in self.discretizer.names:
discretized_feature_names[f] = self.discretizer.names[f][int(
discretized_instance[f])]
domain_mapper = TableDomainMapper(feature_names,
values,
scaled_data[0],
categorical_features=categorical_features,
discretized_feature_names=discretized_feature_names,
feature_indexes=feature_indexes)
ret_exp = explanation.Explanation(domain_mapper,
mode=self.mode,
class_names=self.class_names)
if self.mode == "classification":
ret_exp.predict_proba = yss[0]
if top_labels:
labels = np.argsort(yss[0])[-top_labels:]
ret_exp.top_labels = list(labels)
ret_exp.top_labels.reverse()
else:
ret_exp.predicted_value = predicted_value
ret_exp.min_value = min_y
ret_exp.max_value = max_y
labels = [0]
for label in labels:
(ret_exp.intercept[label],
ret_exp.local_exp[label],
ret_exp.score[label],
ret_exp.local_pred[label]) = self.base.explain_instance_with_data(
scaled_data,
yss,
distances,
label,
num_features,
model_regressor=model_regressor,
feature_selection=self.feature_selection)
if self.mode == "regression":
ret_exp.intercept[1] = ret_exp.intercept[0]
ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]
ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]
return ret_exp
def __data_inverse(self,
data_row,
num_samples,
sampling_method):
"""Generates a neighborhood around a prediction.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to
the means and stds in the training data. For categorical features,
perturb by sampling according to the training distribution, and making
a binary feature that is 1 when the value is the same as the instance
being explained.
Args:
data_row: 1d numpy array, corresponding to a row
num_samples: size of the neighborhood to learn the linear model
sampling_method: 'gaussian' or 'lhs'
Returns:
A tuple (data, inverse), where:
data: dense num_samples * K matrix, where categorical features
are encoded with either 0 (not equal to the corresponding value
in data_row) or 1. The first row is the original instance.
inverse: same as data, except the categorical features are not
binary, but categorical (as the original data)
"""
is_sparse = sp.sparse.issparse(data_row)
if is_sparse:
num_cols = data_row.shape[1]
data = sp.sparse.csr_matrix((num_samples, num_cols), dtype=data_row.dtype)
else:
num_cols = data_row.shape[0]
data = np.zeros((num_samples, num_cols))
categorical_features = range(num_cols)
if self.discretizer is None:
instance_sample = data_row
scale = self.scaler.scale_
mean = self.scaler.mean_
if is_sparse:
# Perturb only the non-zero values
non_zero_indexes = data_row.nonzero()[1]
num_cols = len(non_zero_indexes)
instance_sample = data_row[:, non_zero_indexes]
scale = scale[non_zero_indexes]
mean = mean[non_zero_indexes]
if sampling_method == 'gaussian':
data = self.random_state.normal(0, 1, num_samples * num_cols
).reshape(num_samples, num_cols)
data = np.array(data)
elif sampling_method == 'lhs':
data = lhs(num_cols, samples=num_samples
).reshape(num_samples, num_cols)
means = np.zeros(num_cols)
stdvs = np.array([1]*num_cols)
for i in range(num_cols):
data[:, i] = norm(loc=means[i], scale=stdvs[i]).ppf(data[:, i])
data = np.array(data)
else:
warnings.warn('''Invalid input for sampling_method.
Defaulting to Gaussian sampling.''', UserWarning)
data = self.random_state.normal(0, 1, num_samples * num_cols
).reshape(num_samples, num_cols)
data = np.array(data)
if self.sample_around_instance:
data = data * scale + instance_sample
else:
data = data * scale + mean
if is_sparse:
if num_cols == 0:
data = sp.sparse.csr_matrix((num_samples,
data_row.shape[1]),
dtype=data_row.dtype)
else:
indexes = np.tile(non_zero_indexes, num_samples)
indptr = np.array(
range(0, len(non_zero_indexes) * (num_samples + 1),
len(non_zero_indexes)))
data_1d_shape = data.shape[0] * data.shape[1]
data_1d = data.reshape(data_1d_shape)
data = sp.sparse.csr_matrix(
(data_1d, indexes, indptr),
shape=(num_samples, data_row.shape[1]))
categorical_features = self.categorical_features
first_row = data_row
else:
first_row = self.discretizer.discretize(data_row)
data[0] = data_row.copy()
inverse = data.copy()
for column in categorical_features:
values = self.feature_values[column]
freqs = self.feature_frequencies[column]
inverse_column = self.random_state.choice(values, size=num_samples,
replace=True, p=freqs)
binary_column = (inverse_column == first_row[column]).astype(int)
binary_column[0] = 1
inverse_column[0] = data[0, column]
data[:, column] = binary_column
inverse[:, column] = inverse_column
if self.discretizer is not None:
inverse[1:] = self.discretizer.undiscretize(inverse[1:])
inverse[0] = data_row
return data, inverse
class RecurrentTabularExplainer(LimeTabularExplainer):
"""
An explainer for keras-style recurrent neural networks, where the
input shape is (n_samples, n_timesteps, n_features). This class
just extends the LimeTabularExplainer class and reshapes the training
data and feature names such that they become something like
(val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)
Each of the methods that take data reshape it appropriately,
so you can pass in the training/testing data exactly as you
would to the recurrent neural network.
"""
def __init__(self, training_data, mode="classification",
training_labels=None, feature_names=None,
categorical_features=None, categorical_names=None,
kernel_width=None, kernel=None, verbose=False, class_names=None,
feature_selection='auto', discretize_continuous=True,
discretizer='quartile', random_state=None):
"""
Args:
training_data: numpy 3d array with shape
(n_samples, n_timesteps, n_features)
mode: "classification" or "regression"
training_labels: labels for training data. Not required, but may be
used by discretizer.
feature_names: list of names (strings) corresponding to the columns
in the training data.
categorical_features: list of indices (ints) corresponding to the
categorical columns. Everything else will be considered
continuous. Values in these columns MUST be integers.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
kernel_width: kernel width for the exponential kernel.
If None, defaults to sqrt(number of columns) * 0.75
kernel: similarity kernel that takes euclidean distances and kernel
width as input and outputs weights in (0,1). If None, defaults to
an exponential kernel.
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
discretize_continuous: if True, all non-categorical features will
be discretized into quartiles.
discretizer: only matters if discretize_continuous is True. Options
are 'quartile', 'decile', 'entropy' or a BaseDiscretizer
instance.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
"""
# Reshape X
n_samples, n_timesteps, n_features = training_data.shape
training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(
n_samples, n_timesteps * n_features)
self.n_timesteps = n_timesteps
self.n_features = n_features
if feature_names is None:
feature_names = ['feature%d' % i for i in range(n_features)]
# Update the feature names
feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))
for n in feature_names for i in range(n_timesteps)]
# Send off the the super class to do its magic.
super(RecurrentTabularExplainer, self).__init__(
training_data,
mode=mode,
training_labels=training_labels,
feature_names=feature_names,
categorical_features=categorical_features,
categorical_names=categorical_names,
kernel_width=kernel_width,
kernel=kernel,
verbose=verbose,
class_names=class_names,
feature_selection=feature_selection,
discretize_continuous=discretize_continuous,
discretizer=discretizer,
random_state=random_state)
def _make_predict_proba(self, func):
"""
The predict_proba method will expect 3d arrays, but we are reshaping
them to 2D so that LIME works correctly. This wraps the function
you give in explain_instance to first reshape the data to have
the shape the the keras-style network expects.
"""
def predict_proba(X):
n_samples = X.shape[0]
new_shape = (n_samples, self.n_features, self.n_timesteps)
X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))
return func(X)
return predict_proba
def explain_instance(self, data_row, classifier_fn, labels=(1,),
top_labels=None, num_features=10, num_samples=5000,
distance_metric='euclidean', model_regressor=None):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly perturbing features
from the instance (see __data_inverse). We then learn locally weighted
linear models on this neighborhood data to explain each of the classes
in an interpretable way (see lime_base.py).
Args:
data_row: 2d numpy array, corresponding to a row
classifier_fn: classifier prediction probability function, which
takes a numpy array and outputs prediction probabilities. For
ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
distance_metric: the distance metric to use for weights.
model_regressor: sklearn regressor to use in explanation. Defaults
to Ridge regression in LimeBase. Must have
model_regressor.coef_ and 'sample_weight' as a parameter
to model_regressor.fit()
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
# Flatten input so that the normal explainer can handle it
data_row = data_row.T.reshape(self.n_timesteps * self.n_features)
# Wrap the classifier to reshape input
classifier_fn = self._make_predict_proba(classifier_fn)
return super(RecurrentTabularExplainer, self).explain_instance(
data_row, classifier_fn,
labels=labels,
top_labels=top_labels,
num_features=num_features,
num_samples=num_samples,
distance_metric=distance_metric,
model_regressor=model_regressor)
| 34,542 | 46.254446 | 100 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/moltrans_dti/helper/utils/paddle_io.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paddle IO
"""
from paddle.io import Dataset, IterableDataset
import paddle
import warnings
import bisect
class ConcatDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Arguments:
datasets (sequence): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
"""
Cumsum
"""
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
"""
Initialization
"""
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
"""
getlen function
"""
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
"""
getitem function
"""
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
"""
Cummulative sizes
"""
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
def _accumulate(iterable, fn=lambda x, y: x + y):
# _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = fn(total, element)
yield total
class Subset(Dataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths, generator=None):
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results, e.g.:
>>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
generator (Generator): from torch import default_generator, which is not use in paddle.
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = paddle.randperm(sum(lengths))
return [Subset(dataset, indices[offset - length: offset]) for offset, length in zip(_accumulate(lengths), lengths)]
setattr(paddle.io, "random_split", random_split)
| 4,237 | 29.489209 | 119 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/train_kiba.py | """Training scripts for GraphDTA backbone."""
import rdkit
import torch
import sklearn
import numpy as np
import pandas as pd
import sys, os
import os.path
from os import path
import random
from random import shuffle
from time import time
from rdkit import Chem
import torch.nn as nn
from argparse import ArgumentParser
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils import *
from processing import process_data
from get_len import get_kiba_len
# Set ranodm seed
seed = 1
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set loss function
loss_fn = nn.MSELoss()
# Basic settings
LOG_INTERVAL = 20
# Training script
def train(model, device, train_loader, optimizer, epoch):
"""Training script for GraphDTA backbone model.
Args:
model: DeepDTA backbone model.
device: Device.
train_loader: Dataloader of training set.
optimizer: Optimizer.
epoch: Epoch.
Returns:
loss: Ouput training loss.
"""
print('Training on {} samples...'.format(len(train_loader.dataset)))
model.train()
for batch_idx, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, data.y.view(-1, 1).float().to(device))
loss.backward()
optimizer.step()
if batch_idx % LOG_INTERVAL == 0:
print('Train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch,
batch_idx * len(data.x),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
return loss.item()
def predicting(model, device, loader):
"""Predicting script for GraphDTA backbone model.
Args:
model: GraphDTA backbone model.
device: Device.
loader: Dataloader of validation/testing set.
Returns:
res_label: Output ground truth label.
res_pred: Output prediction.
"""
model.eval()
total_preds = torch.Tensor()
total_labels = torch.Tensor()
print('Make prediction for {} samples...'.format(len(loader.dataset)))
with torch.no_grad():
for data in loader:
data = data.to(device)
output = model(data)
res_pred = torch.cat((total_preds, output.cpu()), 0)
res_label = torch.cat((total_labels, data.y.view(-1, 1).cpu()), 0)
return res_label.numpy().flatten(),res_pred.numpy().flatten()
def cal_len(path):
"""Calculate length of each group."""
lines = open(path,'r').readlines()
li = []
for line in lines:
li.append(int(line.strip()))
lens = np.sum(li)
return li, lens
def main(args):
"""Main function."""
# Basic settings
best_ci = 0
best_epoch = 0
best_train_loss = 10000
rounds = args.rounds
# Set CUDA device
cuda_name = "cuda:" + str(args.cudanum)
device = torch.device(cuda_name if torch.cuda.is_available() else "cpu")
# Modeling...
modeling = [GINConvNet, GATNet, GAT_GCN, GCNNet][args.model]
model_st = modeling.__name__
print(model_st)
model = modeling().to(device)
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # Adam
# Load data
train_data = pd.read_csv("../../Data/KIBA/CV"+str(rounds)+"/CV"+str(rounds)+"_KIBA_unseenP_seenD_train.csv")
val_data = pd.read_csv("../../Data/KIBA/CV"+str(rounds)+"/CV"+str(rounds)+"_KIBA_unseenP_seenD_val.csv")
test_data = pd.read_csv("../../Data/KIBA/test_KIBA_unseenP_seenD.csv")
train_set = process_data(train_data, 'train')
val_set = process_data(val_data, 'val')
test_set = process_data(test_data, 'test')
train_generator = TestbedDataset(root = 'dataset', dataset = 'KIBA_train' + str(rounds), xd = train_set[0],
xt = train_set[1], y = train_set[2], smile_graph = train_set[3])
val_generator = TestbedDataset(root = 'dataset', dataset = 'KIBA_val' + str(rounds), xd = val_set[0],
xt = val_set[1], y = val_set[2], smile_graph = val_set[3])
test_generator = TestbedDataset(root = 'dataset', dataset = 'KIBA_test', xd = test_set[0],
xt = test_set[1], y = test_set[2], smile_graph = test_set[3])
# Make mini-batch processing
train_loader = DataLoader(train_generator, batch_size = args.batchsize, shuffle = True)
val_loader = DataLoader(val_generator, batch_size = args.batchsize, shuffle = False)
test_loader = DataLoader(test_generator, batch_size = args.batchsize, shuffle = False)
# Training...
print("Training.....")
for epoch in range(args.epochs):
print("===============Go for Training===============")
train_loss = train(model, device, train_loader, optimizer, epoch+1)
# Validation...
G, P = predicting(model, device, val_loader)
val_ci = ci(G, P)
val_path = "../../Data/KIBA/CV"+str(rounds)+"/CV"+str(rounds)+"_val.txt"
# Check if kiba len file exists
if(path.exists(val_path) == False):
get_kiba_len()
# Calculate Weighted CI, Average CI of validation set
li,lens = cal_len(val_path)
s = 0
w_ci,a_ci = [],[]
for l in li:
try:
w_ci.append(l*ci(G[s:s+l],P[s:s+l]))
a_ci.append(ci(G[s:s+l],P[s:s+l]))
except:
pass
s += l
weight_ci, average_ci = np.sum(w_ci)/np.sum(li), np.mean(a_ci)
print("===============Go for Validation===============")
print("Weighted CI:",weight_ci)
print("Average CI:",average_ci)
print("Overall CI:",val_ci)
files = open("bestResult/GraphDTA_"+model_st+"_kiba_result"+str(args.rounds)+".txt",'a')
files.write("val_averageCI: "+str(average_ci)+", val_weightedCI: "+str(weight_ci)+", val_overallCI: "+str(val_ci)+", train_loss: "+str(train_loss)+'\n')
model_name = "bestModel/GraphDTA_"+model_st+"_kiba_"+str(rounds)+".model"
# Save the best result
if average_ci > best_ci:
best_ci = average_ci
best_epoch = epoch
best_train_loss = train_loss
# Save best model
print("Saving the best model...")
torch.save(model.state_dict(), model_name)
print("===============Go for Testing===============")
# Load the model
model.load_state_dict(torch.load(model_name))
# Testing...
test_G, test_P = predicting(model, device, test_loader)
test_CI, test_MSE = ci(test_G,test_P), mse(test_G,test_P)
test_path = "../../Data/KIBA/kiba_len.txt"
# Check if kiba len file exists
if(path.exists(test_path) == False):
get_kiba_len()
# Calculate Weighted CI, Average CI of testing set
t_li ,t_lens = cal_len(test_path)
s = 0
w_ci,a_ci = [],[]
for l in t_li:
try:
w_ci.append(l*concordance_index(G[s:s+l],P[s:s+l]))
a_ci.append(concordance_index(G[s:s+l],P[s:s+l]))
except:
pass
s += l
test_weight_ci, test_average_ci = np.sum(w_ci)/t_lens, np.mean(a_ci)
# Save the testing result
files.write("test_MSE:" + str(test_MSE) + ", test_averageCI:" + str(test_average_ci) +
", test_weightedCI:" + str(test_weight_ci) + ", test_overallCI:" + str(test_CI) + "\n")
files.write("best_epoch:" + str(best_epoch + 1) + ", best_train_loss:" + str(best_train_loss) + "\n")
if __name__ == "__main__":
parser = ArgumentParser(description='Starting...')
parser.add_argument('--batchsize', default=512, type=int, metavar='N', help='Batch size')
parser.add_argument('--epochs', default=200, type=int, metavar='N', help='Number of total epochs')
parser.add_argument('--rounds', default=1, type=int, metavar='N', help='The Nth round')
parser.add_argument('--lr', default=5e-4, type=float, metavar='LR', help='Initial learning rate', dest='lr')
parser.add_argument('--cudanum', default=0, type=int, metavar='N', help='The Nth CUDA device')
parser.add_argument('--model', default=0, type=int, metavar='N', help='Select from GINConvNet, GATNet, GAT_GCN, GCNNet')
args = parser.parse_args()
beginT = time()
print("Starting Time: {}".format(beginT))
main(args)
endT = time()
print("Ending Time: {}".format(endT))
print("Duration is: {}".format(endT - beginT)) | 8,902 | 35.338776 | 160 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/utils_bindingDB.py | """Utils scripts for GraphDTA."""
import os
import numpy as np
from math import sqrt
from scipy import stats
from torch_geometric.data import InMemoryDataset, DataLoader
from torch_geometric import data as DATA
import torch
class TestbedDataset(InMemoryDataset):
"""TestbedDataset."""
def __init__(self, root='/tmp', dataset='BindingDB', groups=None,
xd=None, xt=None, y=None, transform=None,
pre_transform=None,smile_graph=None):
# Root is required for save preprocessed data, default is '/tmp'
super(TestbedDataset, self).__init__(root, transform, pre_transform)
self.dataset = dataset
if os.path.isfile(self.processed_paths[0]):
print('Pre-processed data found: {}, loading ...'.format(self.processed_paths[0]))
self.data, self.slices = torch.load(self.processed_paths[0])
else:
print('Pre-processed data {} not found, doing pre-processing...'.format(self.processed_paths[0]))
self.process(groups,xd, xt, y,smile_graph)
@property
def processed_file_names(self):
return [self.dataset + '.pt']
def _process(self):
if not os.path.exists(self.processed_dir):
os.makedirs(self.processed_dir)
def process(self, groups, xd, xt, y, smile_graph):
"""Customize the process method to fit the task of drug-target affinity prediction.
Args:
xd: List of SMILES.
xt: List of encoded target (categorical or one-hot).
y: List of labels.
Returns:
PyTorch-Geometric format processed data.
"""
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
group = groups[i]
# Convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# Make the graph ready for PyTorch Geometrics GCN algorithms
GCNData = DATA.Data(g=torch.FloatTensor([group]),
x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
# Append graph, label and target sequence to data list
data_list.append(GCNData)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
print('Graph construction done. Saving to file.')
self.data, self.slices = self.collate(data_list)
def rmse(y,f):
"""RMSE."""
rmse = sqrt(((y - f)**2).mean(axis=0))
return rmse
def mse(y,f):
"""MSE."""
mse = ((y - f)**2).mean(axis=0)
return mse
def pearson(y,f):
"""Pearson."""
rp = np.corrcoef(y, f)[0,1]
return rp
def spearman(y,f):
"""Spearman."""
rs = stats.spearmanr(y, f)[0]
return rs
def ci(y,f):
"""CI."""
ind = np.argsort(y)
y = y[ind]
f = f[ind]
i = len(y)-1
j = i-1
z = 0.0
S = 0.0
while i > 0:
while j >= 0:
if y[i] > y[j]:
z = z+1
u = f[i] - f[j]
if u > 0:
S = S + 1
elif u == 0:
S = S + 0.5
j = j - 1
i = i - 1
j = i-1
ci = S/z
return ci | 3,795 | 31.724138 | 109 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/processing.py | """Preprocessing scripts for GraphDTA."""
import pandas as pd
import numpy as np
import os
import rdkit
import sklearn
import torch
import json,pickle
from collections import OrderedDict
from rdkit import Chem
from rdkit.Chem import MolFromSmiles
import networkx as nx
from utils import *
# Global setting
seq_voc = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
seq_dict = {v:(i+1) for i,v in enumerate(seq_voc)}
seq_dict_len = len(seq_dict)
max_seq_len = 1000
def one_of_k_encoding(x, allowable_set):
"""tbd."""
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
"""Atom feat."""
return np.array(one_of_k_encoding_unk(atom.GetSymbol(),['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na','Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb','Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H','Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr','Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
[atom.GetIsAromatic()])
def smile_to_graph(smile):
"""SMILES to graph."""
mol = Chem.MolFromSmiles(smile)
c_size = mol.GetNumAtoms()
features = []
for atom in mol.GetAtoms():
feature = atom_features(atom)
features.append( feature / sum(feature) )
edges = []
for bond in mol.GetBonds():
edges.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
g = nx.Graph(edges).to_directed()
edge_index = []
for e1, e2 in g.edges:
edge_index.append([e1, e2])
return c_size, features, edge_index
def seq_cat(prot):
"""tbd."""
x = np.zeros(max_seq_len)
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
return x
def process_data(df, target_name):
"""Process data."""
pairs=[]
for _,row in df.iterrows():
pair = []
lg = Chem.MolToSmiles(Chem.MolFromSmiles(row[2]), isomericSmiles=True)
pair.append(lg)
pair.append(row[1])
pair.append(row[3])
pairs.append(pair)
pairs=pd.DataFrame(pairs)
# Drug
compound_iso_smiles = pairs.iloc[:,0]
compound_iso_smiles = set(compound_iso_smiles)
smile_graph = {}
for smile in compound_iso_smiles:
g = smile_to_graph(smile)
smile_graph[smile] = g
train_drugs, train_prots, train_Y = list(pairs.iloc[:,0]),list(pairs.iloc[:,1]),list(pairs.iloc[:,2])
XT = [seq_cat(t) for t in train_prots]
train_drugs, train_prots, train_Y = np.asarray(train_drugs), np.asarray(XT), np.asarray(train_Y)
return (train_drugs,train_prots,train_Y,smile_graph)
| 3,178 | 33.554348 | 316 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/train_davis.py | """Training scripts for GraphDTA backbone."""
import rdkit
import torch
import sklearn
import numpy as np
import pandas as pd
import sys, os
import random
from random import shuffle
from time import time
from rdkit import Chem
import torch.nn as nn
from argparse import ArgumentParser
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils import *
from processing import process_data
# Set ranodm seed
seed = 1
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set loss function
loss_fn = nn.MSELoss()
# Basic settings
LOG_INTERVAL = 20
# Training script
def train(model, device, train_loader, optimizer, epoch):
"""Training script for GraphDTA backbone model.
Args:
model: DeepDTA backbone model.
device: Device.
train_loader: Dataloader of training set.
optimizer: Optimizer.
epoch: Epoch.
Returns:
loss: Ouput training loss.
"""
print('Training on {} samples...'.format(len(train_loader.dataset)))
model.train()
for batch_idx, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, data.y.view(-1, 1).float().to(device))
loss.backward()
optimizer.step()
if batch_idx % LOG_INTERVAL == 0:
print('Train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch,
batch_idx * len(data.x),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
return loss.item()
def predicting(model, device, loader):
"""Predicting script for GraphDTA backbone model.
Args:
model: GraphDTA backbone model.
device: Device.
loader: Dataloader of validation/testing set.
Returns:
res_label: Output ground truth label.
res_pred: Output prediction.
"""
model.eval()
total_preds = torch.Tensor()
total_labels = torch.Tensor()
print('Make prediction for {} samples...'.format(len(loader.dataset)))
with torch.no_grad():
for data in loader:
data = data.to(device)
output = model(data)
res_pred = torch.cat((total_preds, output.cpu()), 0)
res_label = torch.cat((total_labels, data.y.view(-1, 1).cpu()), 0)
return res_label.numpy().flatten(),res_pred.numpy().flatten()
def main(args):
"""Main function."""
# Basic settings
best_ci = 0
best_epoch = 0
best_train_loss = 10000
rounds = args.rounds
# Set CUDA device
cuda_name = "cuda:" + str(args.cudanum)
device = torch.device(cuda_name if torch.cuda.is_available() else "cpu")
# Modeling...
modeling = [GINConvNet, GATNet, GAT_GCN, GCNNet][args.model]
model_st = modeling.__name__
print(model_st)
model = modeling().to(device)
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # Adam
# Load data
train_data = pd.read_csv("../../Data/DAVIS/CV"+str(rounds)+"/CV"+str(rounds)+"_DAVIS_unseenP_seenD_train.csv")
val_data = pd.read_csv("../../Data/DAVIS/CV"+str(rounds)+"/CV"+str(rounds)+"_DAVIS_unseenP_seenD_val.csv")
test_data = pd.read_csv("../../Data/DAVIS/test_DAVIS_unseenP_seenD.csv")
train_set = process_data(train_data, 'train')
val_set = process_data(val_data, 'val')
test_set = process_data(test_data, 'test')
train_generator = TestbedDataset(root = 'dataset', dataset = 'DAVIS_train' + str(rounds), xd = train_set[0],
xt = train_set[1], y = train_set[2], smile_graph = train_set[3])
val_generator = TestbedDataset(root = 'dataset', dataset = 'DAVIS_val' + str(rounds), xd = val_set[0],
xt = val_set[1], y = val_set[2], smile_graph = val_set[3])
test_generator = TestbedDataset(root = 'dataset', dataset = 'DAVIS_test', xd = test_set[0],
xt = test_set[1], y = test_set[2], smile_graph = test_set[3])
# Make mini-batch processing
train_loader = DataLoader(train_generator, batch_size = args.batchsize, shuffle = True)
val_loader = DataLoader(val_generator, batch_size = args.batchsize, shuffle = False)
test_loader = DataLoader(test_generator, batch_size = args.batchsize, shuffle = False)
# Training...
print("Training.....")
for epoch in range(args.epochs):
print("===============Go for Training===============")
train_loss = train(model, device, train_loader, optimizer, epoch+1)
# Validation...
G, P = predicting(model, device, val_loader)
val_ci = ci(G, P)
# Calculate Weighted CI, Average CI of validation set
lens = int(len(G)/68)
average_ci = np.mean([ci(G[x*68:(x+1)*68],P[x*68:(x+1)*68]) for x in range(0,lens)])
print("===============Go for Validation===============")
print("Weighted CI:",average_ci)
print("Average CI:",average_ci)
print("Overall CI:",val_ci)
files = open("bestResult/GraphDTA_"+model_st+"_davis_result"+str(args.rounds)+".txt",'a')
files.write("val_averageCI: "+str(average_ci)+", val_weightedCI: "+str(average_ci)+", val_overallCI: "+str(val_ci)+", train_loss: "+str(train_loss)+'\n')
model_name = "bestModel/GraphDTA_"+model_st+"_davis_"+str(rounds)+".model"
# Save the best result
if average_ci > best_ci:
best_ci = average_ci
best_epoch = epoch
best_train_loss = train_loss
# Save best model
print("Saving the best model...")
torch.save(model.state_dict(), model_name)
print("===============Go for Testing===============")
# Load the model
model.load_state_dict(torch.load(model_name))
# Testing...
test_G, test_P = predicting(model, device, test_loader)
test_CI, test_MSE = ci(test_G,test_P), mse(test_G,test_P)
# Calculate Weighted CI, Average CI of testing set
t_lens = int(len(test_G)/68)
test_average_ci = np.mean([ci(test_G[x*68:(x+1)*68],test_P[x*68:(x+1)*68]) for x in range(0,t_lens)])
# Save the testing result
files.write("test_MSE:" + str(test_MSE) + ", test_averageCI:" +
str(test_average_ci) + ", test_weightedCI:" + str(test_average_ci) + ", test_overallCI:" + str(test_CI) + "\n")
files.write("best_epoch:" + str(best_epoch + 1) + ", best_train_loss:" + str(best_train_loss) + "\n")
if __name__ == "__main__":
parser = ArgumentParser(description='Starting...')
parser.add_argument('--batchsize', default=512, type=int, metavar='N', help='Batch size')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='Number of total epochs')
parser.add_argument('--rounds', default=1, type=int, metavar='N', help='The Nth round')
parser.add_argument('--lr', default=5e-4, type=float, metavar='LR', help='Initial learning rate', dest='lr')
parser.add_argument('--cudanum', default=0, type=int, metavar='N', help='The Nth CUDA device')
parser.add_argument('--model', default=0, type=int, metavar='N', help='Select from GINConvNet, GATNet, GAT_GCN, GCNNet')
args = parser.parse_args()
beginT = time()
print("Starting Time: {}".format(beginT))
main(args)
endT = time()
print("Ending Time: {}".format(endT))
print("Duration is: {}".format(endT - beginT)) | 7,864 | 37.365854 | 161 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/utils.py | """Utils scripts for GraphDTA."""
import os
import numpy as np
from math import sqrt
from scipy import stats
from torch_geometric.data import InMemoryDataset, DataLoader
from torch_geometric import data as DATA
import torch
class TestbedDataset(InMemoryDataset):
"""TestbedDataset."""
def __init__(self, root='/tmp', dataset='DAVIS',
xd=None, xt=None, y=None, transform=None,
pre_transform=None,smile_graph=None):
# Root is required for save preprocessed data, default is '/tmp'
super(TestbedDataset, self).__init__(root, transform, pre_transform)
self.dataset = dataset
if os.path.isfile(self.processed_paths[0]):
print('Pre-processed data found: {}, loading ...'.format(self.processed_paths[0]))
self.data, self.slices = torch.load(self.processed_paths[0])
else:
print('Pre-processed data {} not found, doing pre-processing...'.format(self.processed_paths[0]))
self.process(xd, xt, y,smile_graph)
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
return [self.dataset + '.pt']
def download(self):
# Download to `self.raw_dir`.
pass
def _download(self):
pass
def _process(self):
if not os.path.exists(self.processed_dir):
os.makedirs(self.processed_dir)
def process(self, xd, xt, y,smile_graph):
"""Customize the process method to fit the task of drug-target affinity prediction.
Args:
xd: List of SMILES.
xt: List of encoded target (categorical or one-hot).
y: List of labels.
Returns:
PyTorch-Geometric format processed data.
"""
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# Convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# Make the graph ready for PyTorch Geometrics GCN algorithms
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
# Append graph, label and target sequence to data list
data_list.append(GCNData)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
print('Graph construction done. Saving to file.')
self.data, self.slices = self.collate(data_list)
def rmse(y,f):
"""RMSE."""
rmse = sqrt(((y - f)**2).mean(axis=0))
return rmse
def mse(y,f):
"""MSE."""
mse = ((y - f)**2).mean(axis=0)
return mse
def pearson(y,f):
"""Pearson."""
rp = np.corrcoef(y, f)[0,1]
return rp
def spearman(y,f):
"""Spearman."""
rs = stats.spearmanr(y, f)[0]
return rs
def ci(y,f):
"""CI."""
ind = np.argsort(y)
y = y[ind]
f = f[ind]
i = len(y)-1
j = i-1
z = 0.0
S = 0.0
while i > 0:
while j >= 0:
if y[i] > y[j]:
z = z+1
u = f[i] - f[j]
if u > 0:
S = S + 1
elif u == 0:
S = S + 0.5
j = j - 1
i = i - 1
j = i-1
ci = S/z
return ci | 3,851 | 29.816 | 109 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/train_bindingDB.py | """Training scripts for GraphDTA backbone."""
import rdkit
import torch
import sklearn
import numpy as np
import pandas as pd
import sys, os
import random
from random import shuffle
from time import time
from rdkit import Chem
import torch.nn as nn
from argparse import ArgumentParser
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils_bindingDB import *
from preprocess import process_data
# Set ranodm seed
seed = 1
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Set loss function
loss_fn = nn.MSELoss()
# Basic settings
LOG_INTERVAL = 20
# Training script
def train(model, device, train_loader, optimizer, epoch):
"""Training script for GraphDTA backbone model.
Args:
model: DeepDTA backbone model.
device: Device.
train_loader: Dataloader of training set.
optimizer: Optimizer.
epoch: Epoch.
Returns:
loss: Ouput training loss.
"""
print('Training on {} samples...'.format(len(train_loader.dataset)))
model.train()
for batch_idx, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, data.y.view(-1, 1).float().to(device))
loss.backward()
optimizer.step()
if batch_idx % LOG_INTERVAL == 0:
print('Train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch,
batch_idx * len(data.x),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
return loss.item()
def predicting(model, device, loader):
"""Predicting script for GraphDTA backbone model.
Args:
model: GraphDTA backbone model.
device: Device.
loader: Dataloader of validation/testing set.
Returns:
res_label: Output ground truth label.
res_pred: Output prediction.
res_group: Output groups.
"""
model.eval()
total_preds = torch.Tensor()
total_labels = torch.Tensor()
total_groups = torch.Tensor()
print('Make prediction for {} samples...'.format(len(loader.dataset)))
with torch.no_grad():
for data in loader:
data = data.to(device)
output = model(data)
res_pred = torch.cat((total_preds, output.cpu()), 0)
res_label = torch.cat((total_labels, data.y.view(-1, 1).cpu()), 0)
res_group = torch.cat((total_groups, data.g.view(-1, 1).cpu()), 0)
return res_label.numpy().flatten(), res_pred.numpy().flatten(), res_group.numpy().flatten()
def main(args):
"""Main function."""
# Basic settings
best_ci = 0
best_epoch = 0
best_train_loss = 10000
rounds = args.rounds
# Set CUDA device
cuda_name = "cuda:" + str(args.cudanum)
device = torch.device(cuda_name if torch.cuda.is_available() else "cpu")
# Modeling...
modeling = [GINConvNet, GATNet, GAT_GCN, GCNNet][args.model]
model_st = modeling.__name__
print(model_st)
model = modeling().to(device)
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) # Adam
# Load data
train_data = pd.read_csv("../../Data/BindingDB/BindingDB_values_mixed_train_ki_filter.csv")
val_data = pd.read_csv("../../Data/BindingDB/BindingDB_values_mixed_val_ki_filter.csv")
test_data = pd.read_csv("../../Data/BindingDB/BindingDB_values_mixed_test_ki_filter.csv")
train_set = process_data(train_data)
val_set = process_data(val_data)
test_set = process_data(test_data)
train_generator = TestbedDataset(root = 'dataset', dataset = 'BindingDB_train', groups=train_set[0], xd = train_set[1],
xt = train_set[2], y = train_set[3], smile_graph = train_set[4])
val_generator = TestbedDataset(root = 'dataset', dataset = 'BindingDB_val', groups=val_set[0], xd = val_set[1],
xt = val_set[2], y = val_set[3], smile_graph = val_set[4])
test_generator = TestbedDataset(root = 'dataset', dataset = 'BindingDB_test', groups=test_set[0], xd = test_set[1],
xt = test_set[2], y = test_set[3], smile_graph = test_set[4])
# Make mini-batch processing
train_loader = DataLoader(train_generator, batch_size = args.batchsize, shuffle = True)
val_loader = DataLoader(val_generator, batch_size = args.batchsize, shuffle = False)
test_loader = DataLoader(test_generator, batch_size = args.batchsize, shuffle = False)
# Training...
print("Training.....")
for epoch in range(args.epochs):
print("===============Go for Training===============")
train_loss = train(model, device, train_loader, optimizer, epoch+1)
# Validation...
G, P, group_li = predicting(model, device, val_loader)
val_ci = ci(G, P)
# Get length of validation set
result = {}
for gl in group_li:
if result.get(gl) == None:
result[gl] = 1
else:
result[gl] += 1
lens = []
lens.extend(result.values())
# Skip len=1 data
k = 0
new_G, new_P, new_lens = [], [], []
for ll in lens:
if ll == 1:
k += 1
else:
new_G.extend(G[k:k+ll])
new_P.extend(P[k:k+ll])
new_lens.append(ll)
k += ll
new_G, new_P = np.array(new_G), np.array(new_P)
# Calculate Weighted CI, Average CI of validation set
s = 0
w_ci,a_ci = [],[]
for l in new_lens:
try:
w_ci.append(l*ci(new_G[s:s+l],new_P[s:s+l]))
a_ci.append(ci(new_G[s:s+l],new_P[s:s+l]))
except:
pass
s += l
weight_ci, average_ci = np.sum(w_ci)/np.sum(new_lens), np.mean(a_ci)
print("===============Go for Validation===============")
print("Weighted CI:",weight_ci)
print("Average CI:",average_ci)
print("Overall CI:",val_ci)
files = open("bestResult/GraphDTA_"+model_st+"_BindingDB_ki_result"+str(args.rounds)+".txt",'a')
files.write("val_averageCI: "+str(average_ci)+", val_weightedCI: "+str(weight_ci)+", val_overallCI: "+str(val_ci)+", train_loss: "+str(train_loss)+'\n')
model_name = "bestModel/GraphDTA_"+model_st+"_BindingDB_ki_"+str(rounds)+".model"
# Save the best result
if average_ci > best_ci:
best_ci = average_ci
best_epoch = epoch
best_train_loss = train_loss
# Save best model
print("Saving the best model...")
torch.save(model.state_dict(), model_name)
print("===============Go for Testing===============")
# Load the model
model.load_state_dict(torch.load(model_name))
# Testing...
test_G, test_P, test_group_li = predicting(model, device, test_loader)
test_CI, test_MSE = ci(test_G,test_P), mse(test_G,test_P)
# Get length of testing set
t_result = {}
for t_gl in test_group_li:
if t_result.get(t_gl)==None:
t_result[t_gl]=1
else:
t_result[t_gl]+=1
t_lens = []
t_lens.extend(t_result.values())
# Skip len=1 data
t_k = 0
t_new_G,t_new_P,t_new_lens = [],[],[]
for t_ll in t_lens:
if t_ll == 1:
t_k += 1
else:
t_new_G.extend(test_G[t_k:t_k+t_ll])
t_new_P.extend(test_P[t_k:t_k+t_ll])
t_new_lens.append(t_ll)
t_k += t_ll
t_new_G, t_new_P = np.array(t_new_G), np.array(t_new_P)
# Calculate Weighted CI, Average CI of testing set
t_s = 0
t_w_ci,t_a_ci = [],[]
for t_l in t_new_lens:
try:
t_w_ci.append(t_l*ci(t_new_G[t_s:t_s+t_l],t_new_P[t_s:t_s+t_l]))
t_a_ci.append(ci(t_new_G[t_s:t_s+t_l],t_new_P[t_s:t_s+t_l]))
except:
pass
t_s += t_l
test_weight_ci, test_average_ci = np.sum(t_w_ci)/np.sum(t_new_lens), np.mean(t_a_ci)
# Save the testing result
files.write("test_MSE:" + str(test_MSE) + ", test_averageCI:" +
str(test_average_ci) + ", test_weightedCI:" + str(test_weight_ci) + ", test_overallCI:" + str(test_CI) + "\n")
files.write("best_epoch:" + str(best_epoch + 1) + ", best_train_loss:" + str(best_train_loss) + "\n")
if __name__ == "__main__":
parser = ArgumentParser(description='Starting...')
parser.add_argument('--batchsize', default=512, type=int, metavar='N', help='Batch size')
parser.add_argument('--epochs', default=50, type=int, metavar='N', help='Number of total epochs')
parser.add_argument('--rounds', default=1, type=int, metavar='N', help='The Nth round')
parser.add_argument('--lr', default=5e-4, type=float, metavar='LR', help='Initial learning rate', dest='lr')
parser.add_argument('--cudanum', default=0, type=int, metavar='N', help='The Nth CUDA device')
parser.add_argument('--model', default=0, type=int, metavar='N', help='Select from GINConvNet, GATNet, GAT_GCN, GCNNet')
args = parser.parse_args()
beginT = time()
print("Starting Time: {}".format(beginT))
main(args)
endT = time()
print("Ending Time: {}".format(endT))
print("Duration is: {}".format(endT - beginT)) | 9,787 | 35.118081 | 160 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/preprocess.py | """Preprocessing scripts for GraphDTA."""
import pandas as pd
import numpy as np
import os
import rdkit
import sklearn
import torch
import json,pickle
from collections import OrderedDict
from rdkit import Chem
from rdkit.Chem import MolFromSmiles
import networkx as nx
from utils import *
# Global setting
seq_voc = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
seq_dict = {v:(i+1) for i,v in enumerate(seq_voc)}
seq_dict_len = len(seq_dict)
max_seq_len = 1000
def one_of_k_encoding(x, allowable_set):
"""tbd."""
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
"""Atom feat."""
return np.array(one_of_k_encoding_unk(atom.GetSymbol(),['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na','Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb','Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H','Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr','Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
[atom.GetIsAromatic()])
def smile_to_graph(smile):
"""SMILES to graph."""
mol = Chem.MolFromSmiles(smile)
c_size = mol.GetNumAtoms()
features = []
for atom in mol.GetAtoms():
feature = atom_features(atom)
features.append( feature / sum(feature) )
edges = []
for bond in mol.GetBonds():
edges.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
g = nx.Graph(edges).to_directed()
edge_index = []
for e1, e2 in g.edges:
edge_index.append([e1, e2])
return c_size, features, edge_index
def seq_cat(prot):
"""tbd."""
x = np.zeros(max_seq_len)
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
return x
def process_data(df):
"""Process data."""
pairs=[]
i = 0
for _,row in df.iterrows():
try:
pair = []
lg = Chem.MolToSmiles(Chem.MolFromSmiles(row[1]), isomericSmiles=True) # smiles
pair.append(lg)
pair.append(seq_cat(row[0]))
pair.append(row[4]) # label
pair.append(row[2]) # target name
pairs.append(pair)
except:
i += 1
print('discard {} SMILES'.format(i))
pairs=pd.DataFrame(pairs)
#Drug
compound_iso_smiles = pairs.iloc[:,0]
compound_iso_smiles = set(compound_iso_smiles)
smile_graph = {}
outlier_smiles = []
for smile in compound_iso_smiles:
g = smile_to_graph(smile)
smile_graph[smile] = g
_, _, edge_index = g
edge_index=torch.LongTensor(edge_index)
if len(edge_index.shape) == 1:
outlier_smiles.append(smile)
print('we discard smiles sequence : {}'.format(outlier_smiles))
train_drugs, train_prots, train_Y, target_name= list(pairs.iloc[:,0]),list(pairs.iloc[:,1]),list(pairs.iloc[:,2]), list(pairs.iloc[:,3])
target_name, train_drugs, train_prots, train_Y = np.asarray(target_name), np.asarray(train_drugs), np.asarray(train_prots), np.asarray(train_Y)
mask = np.full(len(train_drugs),True)
for i in outlier_smiles:
temp = train_drugs != i
mask = mask & temp
target_name = target_name[mask]
train_drugs = train_drugs[mask]
train_prots = train_prots[mask]
train_Y = train_Y[mask]
return (target_name, train_drugs, train_prots, train_Y, smile_graph) | 3,938 | 34.809091 | 316 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/models/ginconv.py | """GraphDTA_GIN backbone model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv, global_add_pool
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
# GINConv backbone model
class GINConvNet(torch.nn.Module):
"""GINConv model.
Args:
data: Input data.
Returns:
out: Prediction results.
"""
def __init__(self, n_output=1, num_features_xd=78, num_features_xt=25,
n_filters=32, embed_dim=128, output_dim=128, dropout=0.2):
super(GINConvNet, self).__init__()
# Basic config
dim = 32
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.n_output = n_output
# SMILES graph branch
nn1 = Sequential(Linear(num_features_xd, dim), ReLU(), Linear(dim, dim))
self.conv1 = GINConv(nn1)
self.bn1 = torch.nn.BatchNorm1d(dim)
nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv2 = GINConv(nn2)
self.bn2 = torch.nn.BatchNorm1d(dim)
nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv3 = GINConv(nn3)
self.bn3 = torch.nn.BatchNorm1d(dim)
nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv4 = GINConv(nn4)
self.bn4 = torch.nn.BatchNorm1d(dim)
nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv5 = GINConv(nn5)
self.bn5 = torch.nn.BatchNorm1d(dim)
self.fc1_xd = Linear(dim, output_dim)
# Protein sequence branch (1d conv)
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc1_xt = nn.Linear(32*121, output_dim)
# Combined layers
self.fc1 = nn.Linear(256, 1024)
self.fc2 = nn.Linear(1024, 256)
self.out = nn.Linear(256, self.n_output) # n_output = 1 for regression task
def forward(self, data):
"""tbd."""
# Get graph input
x, edge_index, batch = data.x, data.edge_index, data.batch
# Get protein input
target = data.target
x = F.relu(self.conv1(x, edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, edge_index))
x = self.bn2(x)
x = F.relu(self.conv3(x, edge_index))
x = self.bn3(x)
x = F.relu(self.conv4(x, edge_index))
x = self.bn4(x)
x = F.relu(self.conv5(x, edge_index))
x = self.bn5(x)
x = global_add_pool(x, batch)
x = F.relu(self.fc1_xd(x))
x = F.dropout(x, p=0.2, training=self.training)
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt_1(embedded_xt)
# Flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc1_xt(xt)
# Concat
xc = torch.cat((x, xt), 1)
# Add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
| 3,247 | 34.304348 | 91 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/models/gcn.py | """GraphDTA_GCN backbone model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, global_max_pool as gmp
# GCN backbone model
class GCNNet(torch.nn.Module):
"""GCN model.
Args:
data: Input data.
Returns:
out: Prediction results.
"""
def __init__(self, n_output=1, n_filters=32, embed_dim=128,num_features_xd=78, num_features_xt=25, output_dim=128, dropout=0.2):
super(GCNNet, self).__init__()
# Basic config
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
# SMILES graph branch
self.n_output = n_output
self.conv1 = GCNConv(num_features_xd, num_features_xd)
self.conv2 = GCNConv(num_features_xd, num_features_xd*2)
self.conv3 = GCNConv(num_features_xd*2, num_features_xd * 4)
self.fc_g1 = torch.nn.Linear(num_features_xd*4, 1024)
self.fc_g2 = torch.nn.Linear(1024, output_dim)
# Protein sequence branch (1d conv)
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc1_xt = nn.Linear(32*121, output_dim)
# Combined layers
self.fc1 = nn.Linear(2*output_dim, 1024)
self.fc2 = nn.Linear(1024, 512)
self.out = nn.Linear(512, self.n_output)
def forward(self, data):
"""tbd."""
# Get graph input
x, edge_index, batch = data.x, data.edge_index, data.batch
# Get protein input
target = data.target
x = self.conv1(x, edge_index)
x = self.relu(x)
x = self.conv2(x, edge_index)
x = self.relu(x)
x = self.conv3(x, edge_index)
x = self.relu(x)
x = gmp(x, batch) # global max pooling
# Flatten
x = self.relu(self.fc_g1(x))
x = self.dropout(x)
x = self.fc_g2(x)
x = self.dropout(x)
# 1d conv layers
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt_1(embedded_xt)
# Flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc1_xt(xt)
# Concat
xc = torch.cat((x, xt), 1)
# Add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
| 2,487 | 30.897436 | 132 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/models/gat.py | """GraphDTA_GAT backbone model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GATConv
from torch_geometric.nn import global_max_pool as gmp
# GAT backbone model
class GATNet(torch.nn.Module):
"""GAT model.
Args:
data: Input data.
Returns:
out: Prediction results.
"""
def __init__(self, num_features_xd=78, n_output=1, num_features_xt=25,
n_filters=32, embed_dim=128, output_dim=128, dropout=0.2):
super(GATNet, self).__init__()
# Basic config
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
# SMILES graph branch
self.gcn1 = GATConv(num_features_xd, num_features_xd, heads=10, dropout=dropout)
self.gcn2 = GATConv(num_features_xd * 10, output_dim, dropout=dropout)
self.fc_g1 = nn.Linear(output_dim, output_dim)
# Protein sequence branch (1d conv)
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc_xt1 = nn.Linear(32*121, output_dim)
# Combined layers
self.fc1 = nn.Linear(256, 1024)
self.fc2 = nn.Linear(1024, 256)
self.out = nn.Linear(256, n_output)
def forward(self, data):
"""tbd."""
# Get graph input
x, edge_index, batch = data.x, data.edge_index, data.batch
# Get protein input
target = data.target
x = F.dropout(x, p=0.2, training=self.training)
x = F.elu(self.gcn1(x, edge_index))
x = F.dropout(x, p=0.2, training=self.training)
x = self.gcn2(x, edge_index)
x = self.relu(x)
x = gmp(x, batch) # global max pooling
x = self.fc_g1(x)
x = self.relu(x)
# 1d conv layers
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt1(embedded_xt)
conv_xt = self.relu(conv_xt)
# Flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc_xt1(xt)
# Concat
xc = torch.cat((x, xt), 1)
# Add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
| 2,428 | 31.386667 | 90 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/GraphDTA/models/gat_gcn.py | """GraphDTA_GATGCN backbone model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GCNConv, GATConv, GINConv, global_add_pool
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
# GATGCN backbone model
class GAT_GCN(torch.nn.Module):
"""GATGCN model.
Args:
data: Input data.
Returns:
out: Prediction results.
"""
def __init__(self, n_output=1, num_features_xd=78, num_features_xt=25,
n_filters=32, embed_dim=128, output_dim=128, dropout=0.2):
super(GAT_GCN, self).__init__()
# Basic config
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.n_output = n_output
# SMILES graph branch
self.conv1 = GATConv(num_features_xd, num_features_xd, heads=10)
self.conv2 = GCNConv(num_features_xd*10, num_features_xd*10)
self.fc_g1 = torch.nn.Linear(num_features_xd*10*2, 1500)
self.fc_g2 = torch.nn.Linear(1500, output_dim)
# Protein sequence branch (1d conv)
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc1_xt = nn.Linear(32*121, output_dim)
# Combined layers
self.fc1 = nn.Linear(256, 1024)
self.fc2 = nn.Linear(1024, 512)
self.out = nn.Linear(512, self.n_output) # n_output = 1 for regression task
def forward(self, data):
"""tbd."""
# Get graph input
x, edge_index, batch = data.x, data.edge_index, data.batch
# Get protein input
target = data.target
x = self.conv1(x, edge_index)
x = self.relu(x)
x = self.conv2(x, edge_index)
x = self.relu(x)
# Apply global max pooling (gmp) and global mean pooling (gap)
x = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = self.relu(self.fc_g1(x))
x = self.dropout(x)
x = self.fc_g2(x)
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt_1(embedded_xt)
# Flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc1_xt(xt)
# Concat
xc = torch.cat((x, xt), 1)
# Add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
| 2,577 | 33.373333 | 91 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pointwise/Moltrans/helper/utils/paddle_io.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paddle IO
"""
from paddle.io import Dataset, IterableDataset
import paddle
import warnings
import bisect
class ConcatDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Arguments:
datasets (sequence): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
"""
Cumsum
"""
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
"""
Initialization
"""
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
"""
getlen function
"""
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
"""
getitem function
"""
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
"""
Cummulative sizes
"""
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
def _accumulate(iterable, fn=lambda x, y: x + y):
# _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = fn(total, element)
yield total
class Subset(Dataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths, generator=None):
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results, e.g.:
>>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
generator (Generator): from torch import default_generator, which is not use in paddle.
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = paddle.randperm(sum(lengths))
return [Subset(dataset, indices[offset - length: offset]) for offset, length in zip(_accumulate(lengths), lengths)]
setattr(paddle.io, "random_split", random_split)
| 4,237 | 29.489209 | 119 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/run_pairwise_GraphDTA_BindingDB.py | from itertools import combinations
import itertools
import argparse
from random import *
import random
import pdb
from lifelines.utils import concordance_index
import functools
import random
import time
import pandas as pd
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import torch
import torch.nn as nn
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils import *
from processing import *
print = functools.partial(print, flush=True)
def group_by(data):
"""
group documents by query-id
:param data: input_data which contains multiple query and corresponding documents
:param qid_index: the column num where qid locates in input data
:return: a dict group by qid
"""
qid_doc_map = {}
idx = 0
#print(data)
for record in data:
#print(type(record[qid_index]))
qid_doc_map.setdefault(record, [])
qid_doc_map[record].append(idx)
idx += 1
return qid_doc_map
def sample_index(pairs,sampling_method = None):
'''
pairs: the score pairs for train or test
return:
index of x1 and x2
'''
x1_index = []
x2_index = []
for i_data in pairs:
if sampling_method == '500 times':
sampled_data = pd.DataFrame(i_data).sample(n=500,replace=True)
if sampling_method == None:
sampled_data = pd.DataFrame(i_data)
x1_index.append(sampled_data.iloc[:,0].values)
x2_index.append(sampled_data.iloc[:,1].values)
return x1_index, x2_index
def get_pairs(scores,K,eps=0.2,seed=0):
"""
compute the ordered pairs whose firth doc has a higher value than second one.
:param scores: given score list of documents for a particular query
:param K: times of sampling
:return: ordered pairs. List of tuple, like [(1,2), (2,3), (1,3)]
"""
pairs = []
random.seed(seed)
for i in range(len(scores)):
#for j in range(len(scores)):
# sampling K times
if K < 1:
K_ = 1
else:
K_ = K
for _ in range(K_):
idx = random.randint(0, len(scores) - 1)
score_diff = float(scores[i]) - float(scores[idx])
if abs(score_diff) > eps:
pairs.append((i, idx, score_diff, len(scores)))
if K < 1:
N_pairs = len(pairs)
pairs = sample(pairs, int(N_pairs*K))
return pairs
def split_pairs(order_pairs, true_scores):
"""
split the pairs into two list, named relevant_doc and irrelevant_doc.
relevant_doc[i] is prior to irrelevant_doc[i]
:param order_pairs: ordered pairs of all queries
:param ture_scores: scores of docs for each query
:return: relevant_doc and irrelevant_doc
"""
relevant_doc = []
irrelevant_doc = []
score_diff = []
N_smiles = []
doc_idx_base = 0
query_num = len(order_pairs)
for i in range(query_num):
pair_num = len(order_pairs[i])
docs_num = len(true_scores[i])
for j in range(pair_num):
d1, d2, score, N = order_pairs[i][j]
d1 += doc_idx_base
d2 += doc_idx_base
relevant_doc.append(d1)
irrelevant_doc.append(d2)
score_diff.append(score)
N_smiles.append(N)
doc_idx_base += docs_num
return relevant_doc, irrelevant_doc, score_diff, N_smiles
def filter_pairs(data,order_paris,threshold):
# filterred the pairs which have score diff less than 0.2
order_paris_filtered = []
for i_pairs in order_paris:
pairs1_score = data[pd.DataFrame(i_pairs).iloc[:,0].values][:,1].astype('float32')
pairs2_score = data[pd.DataFrame(i_pairs).iloc[:,1].values][:,1].astype('float32')
# filtered |score|<threshold
score = pairs1_score-pairs2_score
temp_mask = abs(score) > threshold # 0.2 threshold
i_pairs_filtered = np.array(i_pairs)[temp_mask].tolist()
if len(i_pairs_filtered)>0:
order_paris_filtered.append(i_pairs_filtered)
return order_paris_filtered
class hinge_loss(nn.Module):
def __init__(self,threshold=1,weight=None):
super().__init__()
self.threshold = 1
self.weight = weight
def forward(self,predicted_score,true_score,n = None):
# score_diff = predicted_score - true_score
score_diff = predicted_score*true_score
loss = self.threshold - score_diff
loss = torch.clip(loss,min=0)
loss = torch.square(loss)
if not self.weight is None:
loss = loss * self.weight
return 0.5*loss.mean()
def sample_pairs(true_scores,K,eps,seed):
# get all the pairs after filtering based on scores
order_paris = []
for scores in true_scores:
order_paris.append(get_pairs(scores,K=K,eps=eps,seed=seed))
x1_index, x2_index, train_scores, N_smiles = split_pairs(order_paris ,true_scores)
print('Number of training dataset is {}'.format(len(x1_index)))
# change labels to binary
Y = np.array(train_scores).astype('float32')
Y[Y<0] = 0
Y[Y>0] = 1
return x1_index, x2_index, train_scores, Y
def distributed_concat(tensor, num_total_examples):
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler
return concat[:num_total_examples]
def model_eval(model,val_dataloader,device):
model.eval()
## validation
CI_list = []
weighted_CI_list = []
all_true_label = []
all_predicted_label = []
weights_len = []
with torch.no_grad():
target_pred_scores = []
target_y_label = []
target_groups = []
for batch_id, data in enumerate(val_dataloader):
i_data, groups = data
i_data = i_data.to(device)
# predict
pred_scores = model.forward_single(i_data)
# true label
true_label = i_data.y
target_pred_scores.extend(pred_scores.cpu().numpy().squeeze().tolist())
target_y_label.extend(true_label.cpu().numpy().tolist())
target_groups.extend(groups.numpy().tolist())
target_pred_scores = np.array(target_pred_scores)
target_y_label = np.array(target_y_label)
target_groups = np.array(target_groups)
group_names = np.unique(target_groups)
# loop over all the groups
for i in group_names:
pos = np.where(target_groups == i)
i_target_len = len(pos[0])
i_target_pred_scores = target_pred_scores[pos]
i_target_y_label = target_y_label[pos]
# compute CI
try:
CI = concordance_index(i_target_y_label,i_target_pred_scores)
CI_list.append(CI)
weighted_CI_list.append(i_target_len*CI)
weights_len.append(i_target_len)
except:
pass
average_CI = np.mean(CI_list)
weighted_CI = np.sum(weighted_CI_list)/np.sum(weights_len)
return average_CI, weighted_CI
def dist_run(rank, args, world_size, data_path, model,fold):
dist.init_process_group('nccl', rank=rank, world_size=world_size)
print(rank)
##################### load the data ############################
train_file = 'BindingDB_values_mixed_' + 'train_' + args.index + '_filter' +'.csv'
val_file = 'BindingDB_values_mixed_' + 'val_' + args.index + '_filter' +'.csv'
test_file = 'BindingDB_values_mixed_' + 'test_' + args.index + '_filter' + '.csv'
# load the data
train_set = pd.read_csv(data_path + '/' + train_file)
val_set = pd.read_csv(data_path + '/' + val_file)
test_set = pd.read_csv(data_path + '/' + test_file)
# load the mixed data
mixed_data_file = 'BindingDB_values_mixed_' + 'train_' + args.mixed_index + '_filter.csv'
mixed_set = pd.read_csv(data_path + mixed_data_file)
mixed_data_file1 = 'BindingDB_values_mixed_' + 'train_' + args.mixed_index1 + '_filter.csv'
mixed_set1 = pd.read_csv(data_path + mixed_data_file1)
# pre-processing the data
train_set = process_data_BindingDB(train_set,2) #group name[2], target name[3]
val_set = process_data_BindingDB(val_set,2)
mixed_set = process_data_BindingDB(mixed_set,2)
# print('pre-process mixed_set1')
mixed_set1 = process_data_BindingDB(mixed_set1,2)
test_set = process_data_BindingDB(test_set,2)
# prepare the processed data
#train
train_t = train_set[2]
train_d = train_set[1]
train_groups = train_set[0]
train_y = train_set[3]
train_smiles_graph = train_set[4]
#val
val_t = val_set[2]
val_d = val_set[1]
val_groups = val_set[0]
val_y = val_set[3]
val_smiles_graph = val_set[4]
#mixed
mixed_t = mixed_set[2]
mixed_d = mixed_set[1]
mixed_groups = mixed_set[0]
mixed_y = mixed_set[3]
mixed_smiles_graph = mixed_set[4]
#mixed1
mixed_t1 = mixed_set1[2]
mixed_d1 = mixed_set[1]
mixed_groups1 = mixed_set1[0]
mixed_y1 = mixed_set1[3]
mixed_smiles_graph1 = mixed_set1[4]
# test
test_t = test_set[2]
test_d = test_set[1]
test_groups = test_set[0]
test_y = test_set[3]
test_smiles_graph = test_set[4]
##################### load the data ############################
# concatenate the data
train_t_data = np.concatenate((train_t,mixed_t,mixed_t1))
train_d_data = np.concatenate((train_d,mixed_d,mixed_d1))
train_smiles_graph_data = {**train_smiles_graph, **mixed_smiles_graph,**mixed_smiles_graph1}
# get the group and keys
qid_doc_map_train = group_by(train_groups)
query_idx_train = qid_doc_map_train.keys()
train_keys = np.array(list(query_idx_train))
qid_doc_map_val = group_by(val_groups)
query_idx_val = qid_doc_map_val.keys()
val_keys = np.array(list(query_idx_val))
id_doc_map_mixed = group_by(mixed_groups)
query_idx_mixed = id_doc_map_mixed.keys()
mixed_keys = np.array(list(query_idx_mixed))
id_doc_map_mixed1 = group_by(mixed_groups1)
query_idx_mixed1 = id_doc_map_mixed1.keys()
mixed_keys1 = np.array(list(query_idx_mixed1))
qid_doc_map_test = group_by(test_groups)
query_idx_test = qid_doc_map_test.keys()
test_keys = np.array(list(query_idx_test))
###### get the protein group and index for train/val/test
# get the true scores of train
true_scores = [train_y[qid_doc_map_train[qid]] for qid in query_idx_train]
true_scores_mixed = [mixed_y[id_doc_map_mixed[qid]] for qid in query_idx_mixed]
true_scores_mixed1 = [mixed_y1[id_doc_map_mixed1[qid]] for qid in query_idx_mixed1]
# ###### get val/test dataloader
val_index = []
for qid in val_keys:
val_index.append(qid_doc_map_val[qid])
val_dataset = TestDataset1(groupID=val_groups,xd=val_d,xt=val_t,y=val_y,smile_graph=val_smiles_graph)
val_dataloader = DataLoader(val_dataset, batch_size=args.test_batch_size,shuffle=False)
test_index = []
for qid in test_keys:
test_index.append(qid_doc_map_test[qid])
test_dataset = TestDataset1(groupID=test_groups,xd=test_d,xt=test_t,y=test_y,smile_graph=test_smiles_graph)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size,shuffle=False)
###### load model
model = model.to(rank)
model_dist = DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True)
# define the optimizer
optimizer = torch.optim.Adam(model_dist.parameters(), lr=args.learning_rate)
print('start to train the model...')
for epoch in range(args.N_epoch):
##################### resampling the pairs for each epoch #####################
start_time = time.time()
train_x1_index, train_x2_index, train_scores, Y_train = sample_pairs(true_scores,K=args.sampling_N_train,eps=args.filter_threshold,seed=epoch)
mixed_x1_index, mixed_x2_index, mixed_scores, Y_mixed = sample_pairs(true_scores_mixed,K=args.sampling_N_mixed,eps=args.filter_threshold,seed=epoch)
mixed_x1_index1, mixed_x2_index1, mixed_scores1, Y_mixed1 = sample_pairs(true_scores_mixed,K=args.sampling_N_mixed1,eps=args.filter_threshold,seed=epoch)
# mixed all pairs from train and mixed dataset
len_train = len(train_x1_index)
len_mixed = len(mixed_x1_index)
len_mixed1 = len(mixed_x1_index1)
onehot_train = np.zeros(len_train)
onehot_mixed = np.ones(len_mixed)
onehot_mixed1 = np.ones(len_mixed1)
onehot_train_mixed = np.concatenate((onehot_train,onehot_mixed,onehot_mixed1))
# onehot_train_mixed = np.concatenate((onehot_train,onehot_mixed))
temp = len(train_d)
temp1 = len(mixed_d)
mixed_x1_index = [i + temp for i in mixed_x1_index]
mixed_x2_index = [i + temp for i in mixed_x2_index]
mixed_x1_index1 = [i + temp + temp1 for i in mixed_x1_index1]
mixed_x2_index1 = [i + temp + temp1 for i in mixed_x2_index1]
train_x1_index = train_x1_index + mixed_x1_index + mixed_x1_index1
train_x2_index = train_x2_index + mixed_x2_index + mixed_x2_index1
# train_x1_index = train_x1_index + mixed_x1_index
# train_x2_index = train_x2_index + mixed_x2_index
Y_train_data = np.concatenate((Y_train,Y_mixed,Y_mixed1))
# Y_train_data = np.concatenate((Y_train,Y_mixed))
# get dataloader
train_dataset = TrainDataset(train_x1_index=train_x1_index,train_x2_index=train_x2_index,train_d=train_d_data, train_t=train_t_data, y=Y_train_data,onehot_train_mixed=onehot_train_mixed,smile_graph=train_smiles_graph_data)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size,sampler=train_sampler)
end_time = time.time()
print('make pairs + sampling, take time {}'.format(end_time-start_time))
##################### resampling the pairs for each epoch #####################
print('***************train')
LOSS = []
model.train()
start_time = time.time()
for batch_id, data in enumerate(train_dataloader):
data1 = data[0].to(rank)
data2 = data[1].to(rank)
batch_train_mixed = data1.train_mixed
optimizer.zero_grad()
output = model_dist(data1,data2)
ture_labels = data1.y.view(-1, 1).float()
###### define loss and optimization function
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(output, ture_labels)
loss.backward()
optimizer.step()
LOSS.append(loss.cpu().detach().numpy())
end_time = time.time()
print('take time {}'.format(end_time-start_time),flush=True)
print('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)),flush=True)
if rank == 0:
# test
print('***************test')
val_average_CI, val_weighted_CI = model_eval(model,val_dataloader,device=rank)
print("val_Average CI is {}".format(val_average_CI),flush=True)
print("val_weighted CI is {}".format(val_weighted_CI),flush=True)
if epoch == 0:
best_average_CI = val_weighted_CI
test_average_CI, test_weighted_CI = model_eval(model,test_dataloader,device=rank)
# save the best epoch
torch.save(model.state_dict(), args.save_direct + 'train_model_best' + str(fold))
with open(args.save_direct + "best_results" + str(fold) + ".txt", "w") as text_file:
text_file.write('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)) + '\n')
text_file.write("val Average CI is {}".format(val_average_CI) + '\n')
text_file.write("val weighted CI is {}".format(val_weighted_CI) + '\n')
text_file.write("test Average CI is {}".format(test_average_CI) + '\n')
text_file.write("test weighted CI is {}".format(test_weighted_CI) + '\n')
text_file.write('##############################################' + '\n')
if (epoch != 0) & (val_weighted_CI >= best_average_CI):
best_average_CI = val_weighted_CI
test_average_CI, test_weighted_CI = model_eval(model,test_dataloader,device=rank)
# save the best epoch
torch.save(model.state_dict(), args.save_direct + 'train_model_best' + str(fold))
with open(args.save_direct + "best_results" + str(fold) + ".txt", "w") as text_file:
text_file.write('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)) + '\n')
text_file.write("val Average CI is {}".format(val_average_CI) + '\n')
text_file.write("val weighted CI is {}".format(val_weighted_CI) + '\n')
text_file.write("test Average CI is {}".format(test_average_CI) + '\n')
text_file.write("test weighted CI is {}".format(test_weighted_CI) + '\n')
text_file.write('##############################################' + '\n')
if __name__ == '__main__':
##################### set parameters #####################
parser = argparse.ArgumentParser()
parser.add_argument("--save_direct", default='./output/')
parser.add_argument("--data_path", default='../../Data_for_ALL/')
parser.add_argument("--dataset", default='BindingDB_new')
parser.add_argument("--model_name", default='GAT_GCN',help='[GATNet, GAT_GCN , GCNNet, GINConvNet]')
parser.add_argument("--local_rank", default=0)
parser.add_argument("--index", default='ki')
parser.add_argument("--mixed_index", default='kd')
parser.add_argument("--mixed_index1", default='IC50')
parser.add_argument("--N_runs", default=5)
parser.add_argument("--sampling_N_train", type=int,default=10)
parser.add_argument("--sampling_N_mixed", type=int,default=5)
parser.add_argument("--sampling_N_mixed1", type=int,default=1)
parser.add_argument("--filter_threshold", type=int,default=0.2)
parser.add_argument("--train_batch_size", type=int, default=512)
parser.add_argument("--test_batch_size", type=int, default=512)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--N_epoch", type=int,default=200)
args = parser.parse_args()
##################### set parameters #####################
data_path = args.data_path + args.dataset + '/'
print('><<><><><><><><><><><><><><><><><><><><><><><><><<><><><><><>')
for fold in range(args.N_runs):
###### load model
model = eval(args.model_name)()
world_size = torch.cuda.device_count()
print('Let\'s use', world_size, 'GPUs!')
mp.spawn(dist_run, args=(args, world_size, data_path, model,fold), nprocs=world_size, join=True) | 19,537 | 37.385069 | 230 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/processing.py | import pandas as pd
import numpy as np
import os
import rdkit
import sklearn
import torch
import json,pickle
from collections import OrderedDict
from rdkit import Chem
from rdkit.Chem import MolFromSmiles
import networkx as nx
from torch_geometric.data import InMemoryDataset, DataLoader
from utils import *
import pdb
import torch
def atom_features(atom):
return np.array(one_of_k_encoding_unk(atom.GetSymbol(),['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na','Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb','Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H','Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr','Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
[atom.GetIsAromatic()])
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def smile_to_graph(smile):
mol = Chem.MolFromSmiles(smile)
c_size = mol.GetNumAtoms()
features = []
for atom in mol.GetAtoms():
feature = atom_features(atom)
features.append( feature / sum(feature) )
edges = []
for bond in mol.GetBonds():
edges.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
g = nx.Graph(edges).to_directed()
edge_index = []
for e1, e2 in g.edges:
edge_index.append([e1, e2])
return c_size, features, edge_index
def seq_cat(prot):
x = np.zeros(max_seq_len)
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
return x
seq_voc = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
seq_dict = {v:(i+1) for i,v in enumerate(seq_voc)}
seq_dict_len = len(seq_dict)
max_seq_len = 1000
def results_prepare_pairwise(data,groupID='Target ID',label='Label',BPE='BPE_dt'):
results = []
for i in range(data.shape[0]):
res = []
# res.append(data['Target Sequence'][i])
res.append(data[groupID][i])
res.append(data[label][i])
res.extend(data[BPE][i])
results.append(res)
results=np.array(results)
return results
def process_data(df):
pairs=[]
for _,row in df.iterrows():
pair = []
lg = Chem.MolToSmiles(Chem.MolFromSmiles(row[2]), isomericSmiles=True)
pair.append(lg)
pair.append(row[1])
pair.append(row[3])
pair.append(row[0]) # target name
pairs.append(pair)
pairs=pd.DataFrame(pairs)
#Drug
compound_iso_smiles = pairs.iloc[:,0]
compound_iso_smiles = set(compound_iso_smiles)
smile_graph = {}
for smile in compound_iso_smiles:
g = smile_to_graph(smile)
smile_graph[smile] = g
train_drugs, train_prots, train_Y, target_name= list(pairs.iloc[:,0]),list(pairs.iloc[:,1]),list(pairs.iloc[:,2]), list(pairs.iloc[:,3])
XT = [seq_cat(t) for t in train_prots]
target_name, train_drugs, train_prots, train_Y = np.asarray(target_name), np.asarray(train_drugs), np.asarray(XT), np.asarray(train_Y)
return (target_name, train_drugs, train_prots, train_Y, smile_graph)
def process_data_BindingDB(df,k):
pairs=[]
i = 0
for _,row in df.iterrows():
try:
pair = []
lg = Chem.MolToSmiles(Chem.MolFromSmiles(row[1]), isomericSmiles=True) #smiles
pair.append(lg)
# pair.append(row[0]) #target Sequence
pair.append(seq_cat(row[0]))
pair.append(row[4]) # label
pair.append(row[k]) # group name[2], target name[3]
pairs.append(pair)
except:
i += 1
print('discard {} SMILES'.format(i))
pairs=pd.DataFrame(pairs)
#Drug
compound_iso_smiles = pairs.iloc[:,0]
compound_iso_smiles = set(compound_iso_smiles)
smile_graph = {}
outlier_smiles = []
for smile in compound_iso_smiles:
g = smile_to_graph(smile)
smile_graph[smile] = g
_, _, edge_index = g
edge_index=torch.LongTensor(edge_index)
if len(edge_index.shape) == 1:
outlier_smiles.append(smile)
print('we discard smiles sequence : {}'.format(outlier_smiles))
train_drugs, train_prots, train_Y, target_name= list(pairs.iloc[:,0]),list(pairs.iloc[:,1]),list(pairs.iloc[:,2]), list(pairs.iloc[:,3])
target_name, train_drugs, train_prots, train_Y = np.asarray(target_name), np.asarray(train_drugs), np.asarray(train_prots), np.asarray(train_Y)
mask = np.full(len(train_drugs),True)
for i in outlier_smiles:
temp = train_drugs != i
mask = mask & temp
target_name = target_name[mask]
train_drugs = train_drugs[mask]
train_prots = train_prots[mask]
train_Y = train_Y[mask]
return (target_name, train_drugs, train_prots, train_Y, smile_graph)
def process_data_BindingDB_2_df(df):
pairs=[]
outlier_smiles = ['F', '[SH-]', '[I-]', 'S', 'I', '[F-]']
'F' not in outlier_smiles
i = 0
for _,row in df.iterrows():
smiles = row[1]
if smiles not in outlier_smiles:
try:
pair = []
lg = Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True) #smiles
pair.append(row[0]) #target Sequence
pair.append(smiles) # smiles
pair.append(row[2]) # groupID
pair.append(row[3]) # targetID
pair.append(row[4]) # label
pairs.append(pair)
except:
pass
pairs=pd.DataFrame(pairs)
pairs.columns = ['Target','SMILES','groupID','targetID','Label']
return pairs
def filter_protein(train_data):
filterset = [1,2,3,4,5,6,7,8,9,0]
mask_train = [True] * len(train_data)
for i in filterset:
temp = [not (str(i) in T) for T in train_data['Target']]
mask_train = [a&b for a,b in zip(mask_train,temp)]
train_data = train_data[mask_train]
return train_data
| 6,546 | 29.593458 | 316 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/utils.py | import os
import numpy as np
from math import sqrt
from scipy import stats
from torch_geometric.data import InMemoryDataset, DataLoader
from torch_geometric.data import Dataset
from torch_geometric import data as DATA
import torch
import pdb
class TrainDataset(Dataset):
def __init__(self, root='./', train_x1_index=None, train_x2_index=None, train_d=None, train_t=None, y=None, onehot_train_mixed=None,smile_graph=None,transform=None,
pre_transform=None):
super(TrainDataset, self).__init__(root,transform, pre_transform)
#root is required for save preprocessed data, default is '/tmp'
self.train_x1_index = train_x1_index
self.train_x2_index = train_x2_index
self.train_d = train_d
self.train_t = train_t
self.y = y
self.onehot_train_mixed = onehot_train_mixed
self.smile_graph = smile_graph
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd1, xd2, xt1, xt2, y, train_mixed, smile_graph):
smiles1 = xd1
target1 = xt1
smiles2 = xd2
target2 = xt2
labels = y
# convert SMILES to molecular representation using rdkit
c_size1, features1, edge_index1 = smile_graph[smiles1]
c_size2, features2, edge_index2 = smile_graph[smiles2]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData1 = DATA.Data(x=torch.Tensor(features1),
edge_index=torch.LongTensor(edge_index1).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData1.target = torch.LongTensor([target1])
GCNData1.train_mixed = torch.LongTensor([train_mixed])
GCNData1.__setitem__('c_size', torch.LongTensor([c_size1]))
GCNData2 = DATA.Data(x=torch.Tensor(features2),
edge_index=torch.LongTensor(edge_index2).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData2.target = torch.LongTensor([target2])
GCNData2.train_mixed = torch.LongTensor([train_mixed])
GCNData2.__setitem__('c_size', torch.LongTensor([c_size2]))
return GCNData1, GCNData2
def len(self):
return len(self.train_x1_index)
def get(self, idx):
x1_index = self.train_x1_index[idx]
x2_index = self.train_x2_index[idx]
xd1 = self.train_d[x1_index]
xd2 = self.train_d[x2_index]
xt1 = self.train_t[x1_index]
xt2 = self.train_t[x2_index]
Y = self.y[idx]
train_mixed = self.onehot_train_mixed[idx]
data1, data2 = self.process(xd1, xd2, xt1, xt2, Y, train_mixed, self.smile_graph)
return data1, data2
class TrainDataset1(Dataset):
def __init__(self, root='./', train_x1_index=None, train_x2_index=None, train_d=None, train_t=None, y=None, smile_graph=None,transform=None,
pre_transform=None):
super(TrainDataset1, self).__init__(root,transform, pre_transform)
#root is required for save preprocessed data, default is '/tmp'
self.train_x1_index = train_x1_index
self.train_x2_index = train_x2_index
self.train_d = train_d
self.train_t = train_t
self.y = y
self.smile_graph = smile_graph
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd1, xd2, xt1, xt2, y, smile_graph):
smiles1 = xd1
target1 = xt1
smiles2 = xd2
target2 = xt2
labels = y
# convert SMILES to molecular representation using rdkit
c_size1, features1, edge_index1 = smile_graph[smiles1]
c_size2, features2, edge_index2 = smile_graph[smiles2]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData1 = DATA.Data(x=torch.Tensor(features1),
edge_index=torch.LongTensor(edge_index1).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData1.target = torch.LongTensor([target1])
GCNData1.__setitem__('c_size', torch.LongTensor([c_size1]))
GCNData2 = DATA.Data(x=torch.Tensor(features2),
edge_index=torch.LongTensor(edge_index2).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData2.target = torch.LongTensor([target2])
GCNData2.__setitem__('c_size', torch.LongTensor([c_size2]))
return GCNData1, GCNData2
def len(self):
return len(self.train_x1_index)
def get(self, idx):
x1_index = self.train_x1_index[idx]
x2_index = self.train_x2_index[idx]
xd1 = self.train_d[x1_index]
xd2 = self.train_d[x2_index]
xt1 = self.train_t[x1_index]
xt2 = self.train_t[x2_index]
Y = self.y[idx]
data1, data2 = self.process(xd1, xd2, xt1, xt2, Y, self.smile_graph)
return data1, data2
class TestDataset(Dataset):
def __init__(self, root='./', xd=None, xt=None, y=None, smile_graph=None,test_index=None,transform=None,
pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(TestDataset, self).__init__(root,transform, pre_transform)
self.test_index = test_index
self.max_len = max([len(i) for i in self.test_index])
self.data_list = self.process(xd, xt, y,smile_graph)
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd, xt, y, smile_graph):
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
return data_list
def len(self):
return len(self.test_index)
def get(self, idx):
return_test_index = self.test_index[idx]
return_data = [ self.data_list[index] for index in return_test_index]
return return_data
class TestDataset1(Dataset):
def __init__(self, root='./', xd=None, xt=None, y=None, smile_graph=None,groupID=None,transform=None,
pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(TestDataset1, self).__init__(root,transform, pre_transform)
self.xd = xd
self.xt = xt
self.y = y
self.groupID = groupID
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd, xt, y, smile_graph):
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
return data_list
def len(self):
return len(self.test_index)
def get(self, idx):
return_data = process( xd[idx], xt[idx], y[idx], smile_graph)
return_group = self.groupID[idx]
return (return_data, return_group)
class Data_Encoder(Dataset):
def __init__(self, root='./', data=None, transform=None, pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(Data_Encoder, self).__init__(root,transform, pre_transform)
self.data = data
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def len(self):
return len(self.data)
def get(self, idx):
return_data = self.data[idx]
return return_data | 9,638 | 30.603279 | 168 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/run_pairwise_GraphDTA_CV.py | from itertools import combinations
import itertools
from random import *
import random
import pdb
from lifelines.utils import concordance_index
from sklearn import preprocessing
import functools
import random
import time
import pandas as pd
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import torch
import torch.nn as nn
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils import *
from processing import process_data
import argparse
print = functools.partial(print, flush=True)
def group_by(data):
"""
group documents by query-id
:param data: input_data which contains multiple query and corresponding documents
:param qid_index: the column num where qid locates in input data
:return: a dict group by qid
"""
qid_doc_map = {}
idx = 0
for record in data:
qid_doc_map.setdefault(record, [])
qid_doc_map[record].append(idx)
idx += 1
return qid_doc_map
def sample_index(pairs,sampling_method = None):
'''
pairs: the score pairs for train or test
return:
index of x1 and x2
'''
x1_index = []
x2_index = []
for i_data in pairs:
if sampling_method == '500 times':
sampled_data = pd.DataFrame(i_data).sample(n=500,replace=True)
if sampling_method == None:
sampled_data = pd.DataFrame(i_data)
x1_index.append(sampled_data.iloc[:,0].values)
x2_index.append(sampled_data.iloc[:,1].values)
return x1_index, x2_index
def get_pairs(scores,K,eps=0.2,seed=0):
"""
compute the ordered pairs whose firth doc has a higher value than second one.
:param scores: given score list of documents for a particular query
:param K: times of sampling
:return: ordered pairs. List of tuple, like [(1,2), (2,3), (1,3)]
"""
pairs = []
random.seed(seed)
for i in range(len(scores)):
#for j in range(len(scores)):
# sampling K times
for _ in range(K):
idx = random.randint(0, len(scores) - 1)
score_diff = float(scores[i]) - float(scores[idx])
if abs(score_diff) > eps:
pairs.append((i, idx, score_diff, len(scores)))
return pairs
def split_pairs(order_pairs, true_scores):
"""
split the pairs into two list, named relevant_doc and irrelevant_doc.
relevant_doc[i] is prior to irrelevant_doc[i]
:param order_pairs: ordered pairs of all queries
:param ture_scores: scores of docs for each query
:return: relevant_doc and irrelevant_doc
"""
relevant_doc = []
irrelevant_doc = []
score_diff = []
N_smiles = []
doc_idx_base = 0
query_num = len(order_pairs)
for i in range(query_num):
pair_num = len(order_pairs[i])
docs_num = len(true_scores[i])
for j in range(pair_num):
d1, d2, score, N = order_pairs[i][j]
d1 += doc_idx_base
d2 += doc_idx_base
relevant_doc.append(d1)
irrelevant_doc.append(d2)
score_diff.append(score)
N_smiles.append(N)
doc_idx_base += docs_num
return relevant_doc, irrelevant_doc, score_diff, N_smiles
def filter_pairs(data,order_paris,threshold):
# filterred the pairs which have score diff less than 0.2
order_paris_filtered = []
for i_pairs in order_paris:
pairs1_score = data[pd.DataFrame(i_pairs).iloc[:,0].values][:,1].astype('float32')
pairs2_score = data[pd.DataFrame(i_pairs).iloc[:,1].values][:,1].astype('float32')
# filtered |score|<threshold
score = pairs1_score-pairs2_score
temp_mask = abs(score) > threshold # 0.2 threshold
i_pairs_filtered = np.array(i_pairs)[temp_mask].tolist()
if len(i_pairs_filtered)>0:
order_paris_filtered.append(i_pairs_filtered)
return order_paris_filtered
class hinge_loss(nn.Module):
def __init__(self,threshold=1,weight=None):
super().__init__()
self.threshold = 1
self.weight = weight
def forward(self,predicted_score,true_score,n = None):
# score_diff = predicted_score - true_score
score_diff = predicted_score*true_score
loss = self.threshold - score_diff
loss = torch.clip(loss,min=0)
loss = torch.square(loss)
if not self.weight is None:
loss = loss * self.weight
return 0.5*loss.mean()
def sample_pairs(true_scores,K,eps,seed):
# get all the pairs after filtering based on scores
order_paris = []
for scores in true_scores:
order_paris.append(get_pairs(scores,K=K,eps=eps,seed=seed))
x1_index, x2_index, train_scores, N_smiles = split_pairs(order_paris ,true_scores)
print('Number of training dataset is {}'.format(len(x1_index)))
# change labels to binary
Y = np.array(train_scores).astype('float32')
Y[Y<0] = 0
Y[Y>0] = 1
return x1_index, x2_index, train_scores, Y
def distributed_concat(tensor, num_total_examples):
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler
return concat[:num_total_examples]
def model_eval(model,val_dataloader,device):
model.eval()
## validation
CI_list = []
weighted_CI_list = []
weights_len = []
with torch.no_grad():
for batch_id, data in enumerate(val_dataloader):
i_target_len = len(data)
i_target_pred_scores = []
i_target_y_label = []
# loop over all the D-T pairs in one group(T group)
for i_data in data:
i_data = i_data.to(device)
pred_scores = model.forward_single(i_data)
# get the predicted labels
i_target_pred_scores.append(float(pred_scores))
# get the true labels
i_target_y_label.append(float(i_data.y.cpu()))
i_target_pred_scores = np.array(i_target_pred_scores)
i_target_y_label = np.array(i_target_y_label)
# compute CI
CI = concordance_index(i_target_y_label,i_target_pred_scores)
CI_list.append(CI)
weighted_CI_list.append(i_target_len*CI)
weights_len.append(i_target_len)
average_CI = np.mean(CI_list)
weighted_CI = np.sum(weighted_CI_list)/np.sum(weights_len)
return average_CI, weighted_CI
def dist_run(rank, args, world_size, train_set,mixed_set,val_set,test_set,model,CV):
dist.init_process_group('nccl', rank=rank, world_size=world_size)
print(rank)
# prepare the processed data
#train
train_t = train_set[2]
train_d = train_set[1]
train_groups = train_set[0]
train_y = train_set[3]
train_smiles_graph = train_set[4]
if args.is_mixed:
#mixed
mixed_t = mixed_set[2]
mixed_d = mixed_set[1]
mixed_groups = mixed_set[0]
mixed_y = mixed_set[3]
mixed_smiles_graph = mixed_set[4]
del train_set
del mixed_set
# val
val_t = val_set[2]
val_d = val_set[1]
val_groups = val_set[0]
val_y = val_set[3]
val_smiles_graph = val_set[4]
# test
test_t = test_set[2]
test_d = test_set[1]
test_groups = test_set[0]
test_y = test_set[3]
test_smiles_graph = test_set[4]
##################### load the data ############################
if args.is_mixed:
# concatenate the data
train_t_data = np.concatenate((train_t,mixed_t))
train_d_data = np.concatenate((train_d,mixed_d))
train_smiles_graph_data = {**train_smiles_graph, **mixed_smiles_graph}
else:
train_t_data = train_t
train_d_data = train_d
train_smiles_graph_data = train_smiles_graph
# get the group
qid_doc_map_train = group_by(train_groups)
query_idx_train = qid_doc_map_train.keys()
train_keys = np.array(list(query_idx_train))
if args.is_mixed:
id_doc_map_mixed = group_by(mixed_groups)
query_idx_mixed = id_doc_map_mixed.keys()
mixed_keys = np.array(list(query_idx_mixed))
qid_doc_map_val = group_by(val_groups)
query_idx_val = qid_doc_map_val.keys()
val_keys = np.array(list(query_idx_val))
qid_doc_map_test = group_by(test_groups)
query_idx_test = qid_doc_map_test.keys()
test_keys = np.array(list(query_idx_test))
###### get the protein group and index for train/val/test
# get the true scores of train
true_scores = [train_y[qid_doc_map_train[qid]] for qid in query_idx_train]
if args.is_mixed:
true_scores_mixed = [mixed_y[id_doc_map_mixed[qid]] for qid in query_idx_mixed]
# ###### get val/test dataloader
val_index = []
for qid in val_keys:
val_index.append(qid_doc_map_val[qid])
val_dataset = TestDataset(test_index=val_index,xd=val_d,xt=val_t,y=val_y,smile_graph=val_smiles_graph)
val_dataloader = DataLoader(val_dataset, batch_size=args.test_batch_size,shuffle=False)
test_index = []
for qid in test_keys:
test_index.append(qid_doc_map_test[qid])
test_dataset = TestDataset(test_index=test_index,xd=test_d,xt=test_t,y=test_y,smile_graph=test_smiles_graph)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size,shuffle=False)
###### load model
model = model.to(rank)
model_dist = DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True)
# define the optimizer
optimizer = torch.optim.Adam(model_dist.parameters(), lr=args.learning_rate)
print('start to train the model...')
for epoch in range(args.N_epoch):
##################### resampling the pairs for each epoch #####################
start_time = time.time()
train_x1_index, train_x2_index, train_scores, Y_train = sample_pairs(true_scores,K=args.sampling_N_train,eps=args.filter_threshold,seed=epoch)
if args.is_mixed:
mixed_x1_index, mixed_x2_index, mixed_scores, Y_mixed = sample_pairs(true_scores_mixed,K=args.sampling_N_mixed,eps=args.filter_threshold,seed=epoch)
# mixed all pairs from train and mixed dataset
len_train = len(train_x1_index)
onehot_train = np.zeros(len_train)
if args.is_mixed:
len_mixed1 = len(mixed_x1_index)
onehot_mixed = np.ones(len_mixed1)
onehot_train_mixed = np.concatenate((onehot_train,onehot_mixed))
else:
onehot_train_mixed = onehot_train
if args.is_mixed:
temp = len(train_d)
mixed_x1_index = [i + temp for i in mixed_x1_index]
mixed_x2_index = [i + temp for i in mixed_x2_index]
train_x1_index = train_x1_index + mixed_x1_index
train_x2_index = train_x2_index + mixed_x2_index
Y_train_data = np.concatenate((Y_train,Y_mixed))
else:
Y_train_data = Y_train
# get dataloader
train_dataset = TrainDataset(train_x1_index=train_x1_index,train_x2_index=train_x2_index,train_d=train_d_data, train_t=train_t_data, y=Y_train_data,onehot_train_mixed=onehot_train_mixed,smile_graph=train_smiles_graph_data)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size,sampler=train_sampler)
end_time = time.time()
print('make pairs + sampling, take time {}'.format(end_time-start_time))
##################### resampling the pairs for each epoch #####################
print('***************train')
LOSS = []
model.train()
start_time = time.time()
for batch_id, data in enumerate(train_dataloader):
data1 = data[0].to(rank)
data2 = data[1].to(rank)
batch_train_mixed = data1.train_mixed
optimizer.zero_grad()
output = model_dist(data1,data2)
ture_labels = data1.y.view(-1, 1).float()
output_train = output[batch_train_mixed==0]
output_mixed = output[batch_train_mixed==1]
ture_labels_train = ture_labels[batch_train_mixed==0]
ture_labels_test = ture_labels[batch_train_mixed==1]
###### define loss and optimization function
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(output, ture_labels)
loss.backward()
optimizer.step()
if batch_id % 20 == 0:
print('batch {} loss {}'.format(batch_id,loss.item()))
LOSS.append(loss.cpu().detach().numpy())
end_time = time.time()
print('take time {}'.format(end_time-start_time))
print('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)))
if rank == 0:
# validation
print('***************validation')
val_average_CI, val_weighted_CI = model_eval(model,val_dataloader,device='cuda:0')
print("val_Average CI is {}".format(val_average_CI))
print("val_weighted CI is {}".format(val_weighted_CI))
# test
print('***************test')
test_average_CI, test_weighted_CI = model_eval(model,test_dataloader,device='cuda:0')
print("test_Average CI is {}".format(test_average_CI))
print("test_weighted CI is {}".format(test_weighted_CI))
if epoch == 0:
best_average_CI = val_average_CI
# save the best epoch
torch.save(model.state_dict(), args.save_direct + CV + '_' + 'train_model_best' )
with open(args.save_direct + CV + '_' + "best_results.txt", "w") as text_file:
text_file.write('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)) + '\n')
text_file.write("val Average CI is {}".format(val_average_CI) + '\n')
text_file.write("val weighted CI is {}".format(val_weighted_CI) + '\n')
text_file.write("test Average CI is {}".format(test_average_CI) + '\n')
text_file.write("test weighted CI is {}".format(test_weighted_CI) + '\n')
text_file.write('##############################################' + '\n')
if (epoch != 0) & (val_average_CI >= best_average_CI):
best_average_CI = val_average_CI
# save the best epoch
torch.save(model.state_dict(), args.save_direct + CV + '_' + 'train_model_best' )
with open(args.save_direct + CV + '_' + "best_results.txt", "w") as text_file:
text_file.write('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)) + '\n')
text_file.write("val Average CI is {}".format(val_average_CI) + '\n')
text_file.write("val weighted CI is {}".format(val_weighted_CI) + '\n')
text_file.write("test Average CI is {}".format(test_average_CI) + '\n')
text_file.write("test weighted CI is {}".format(test_weighted_CI) + '\n')
text_file.write('##############################################' + '\n')
def run(args):
print('Load data...')
###### load model
model = eval(args.model_name)()
CVs = ['CV1','CV2','CV3','CV4','CV5']
data_path = args.data_path + args.dataset + '/'
for CV in CVs:
print('><<><><><><><><><><><><><><><><><><><><><><><><><<><><><><><>')
print('start {}'.format(CV))
##################### load the data ############################
train_file = CV + '_' + args.dataset + '_' + args.split +'_' + 'train' + '.csv'
val_file = CV + '_' + args.dataset + '_' + args.split + '_' + 'val' + '.csv'
test = 'test_' + args.dataset + '_' + args.split + '.csv'
# load the data
train_data = pd.read_csv(data_path + CV + '/' + train_file)
val_data = pd.read_csv(data_path + CV + '/' + val_file)
test_data = pd.read_csv(data_path + test)
if args.is_mixed:
# load the mixed data
if args.dataset == 'DAVIS':
mixed_dataset = 'KIBA'
if args.dataset == 'KIBA':
mixed_dataset = 'DAVIS'
# laod the mixed data
mixed_data_file = mixed_dataset + '_mixed_train_unseenP_seenD.csv'
mixed_data = pd.read_csv(data_path + mixed_data_file)
# remove the repeated protein sequence
val_t = val_data['Target Sequence'].unique()
mixed_t = mixed_data['Target Sequence'].unique()
filter1 = list((set(val_t).intersection(set(mixed_t))))
mixed_data = mixed_data[~mixed_data['Target Sequence'].isin(filter1)]
mixed_set = process_data(mixed_data)
else:
mixed_set = None
# pre-processing the data
train_set = process_data(train_data)
val_set = process_data(val_data)
test_set = process_data(test_data)
world_size = torch.cuda.device_count()
print('Let\'s use', world_size, 'GPUs!')
mp.spawn(dist_run, args=(args, world_size, train_set,mixed_set,val_set,test_set,model,CV), nprocs=world_size, join=True)
if __name__ == '__main__':
##################### set parameters #####################
parser = argparse.ArgumentParser()
parser.add_argument("--save_direct", default='./output/')
parser.add_argument("--data_path", default='../Data_for_ALL/')
parser.add_argument("--dataset", default='DAVIS',help=' DAVIS | KIBA')
parser.add_argument("--model_name", default='GAT_GCN',help='[GATNet, GAT_GCN , GCNNet, GINConvNet]')
parser.add_argument("--split", default='unseenP_seenD')
parser.add_argument("--local_rank", default=0)
parser.add_argument("--is_mixed", default=False)
parser.add_argument("--sampling_N_train", type=int,default=10)
parser.add_argument("--sampling_N_mixed", type=int,default=5)
parser.add_argument("--filter_threshold", type=int,default=0.2)
parser.add_argument("--train_batch_size", type=int, default=256)
parser.add_argument("--test_batch_size", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=1e-3)
parser.add_argument("--N_epoch", type=int,default=200)
args = parser.parse_args()
##################### set parameters #####################
run(args)
| 18,993 | 36.243137 | 230 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/models/ginconv.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv, global_add_pool
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
# GINConv model
class GINConvNet(torch.nn.Module):
def __init__(self, n_output=1,num_features_xd=78, num_features_xt=25,
n_filters=32, embed_dim=32, output_dim=32, dropout=0.2, kernel_size=32):
super(GINConvNet, self).__init__()
dim = 32
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.n_output = n_output
# convolution layers
nn1 = Sequential(Linear(num_features_xd, dim), ReLU(), Linear(dim, dim))
self.conv1 = GINConv(nn1)
self.bn1 = torch.nn.BatchNorm1d(dim)
nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv2 = GINConv(nn2)
self.bn2 = torch.nn.BatchNorm1d(dim)
nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv3 = GINConv(nn3)
self.bn3 = torch.nn.BatchNorm1d(dim)
nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv4 = GINConv(nn4)
self.bn4 = torch.nn.BatchNorm1d(dim)
nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv5 = GINConv(nn5)
self.bn5 = torch.nn.BatchNorm1d(dim)
self.fc1_xd = Linear(dim, output_dim)
# 1D convolution on protein sequence
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=kernel_size)
self.protein_repr_size = n_filters * (embed_dim - kernel_size + 1)
self.fc1_xt = nn.Linear(self.protein_repr_size, output_dim)
# combined layers
'''
self.fc1 = nn.Linear(256, 1024)
self.fc2 = nn.Linear(1024, 256)
self.out = nn.Linear(256, self.n_output) # n_output = 1 for regression task
'''
self.fc1 = nn.Linear(output_dim + output_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.out = nn.Linear(256, self.n_output) # n_output = 1 for regression task
def forward_single(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
target = data.target
x = F.relu(self.conv1(x, edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, edge_index))
x = self.bn2(x)
x = F.relu(self.conv3(x, edge_index))
x = self.bn3(x)
x = F.relu(self.conv4(x, edge_index))
x = self.bn4(x)
x = F.relu(self.conv5(x, edge_index))
x = self.bn5(x)
x = global_add_pool(x, batch)
x = F.relu(self.fc1_xd(x))
x = F.dropout(x, p=0.2, training=self.training)
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt_1(embedded_xt)
# flatten
xt = conv_xt.view(-1, self.protein_repr_size)
xt = self.fc1_xt(xt)
# concat
xc = torch.cat((x, xt), 1)
# add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
def forward(self, data1, data2):
out1 = self.forward_single(data1)
out2 = self.forward_single(data2)
return out1 - out2
| 3,499 | 34.353535 | 101 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/models/gcn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, global_max_pool as gmp
# GCN based model
class GCNNet(torch.nn.Module):
def __init__(self, n_output=1, n_filters=32, embed_dim=128,num_features_xd=78, num_features_xt=25, output_dim=128, dropout=0.2):
super(GCNNet, self).__init__()
# SMILES graph branch
self.n_output = n_output
self.conv1 = GCNConv(num_features_xd, num_features_xd)
self.conv2 = GCNConv(num_features_xd, num_features_xd*2)
self.conv3 = GCNConv(num_features_xd*2, num_features_xd * 4)
self.fc_g1 = torch.nn.Linear(num_features_xd*4, 1024)
self.fc_g2 = torch.nn.Linear(1024, output_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
# protein sequence branch (1d conv)
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc1_xt = nn.Linear(32*121, output_dim)
# combined layers
self.fc1 = nn.Linear(2*output_dim, 1024)
self.fc2 = nn.Linear(1024, 512)
self.out = nn.Linear(512, self.n_output)
def forward_single(self, data):
# get graph input
x, edge_index, batch = data.x, data.edge_index, data.batch
# get protein input
target = data.target
x = self.conv1(x, edge_index)
x = self.relu(x)
x = self.conv2(x, edge_index)
x = self.relu(x)
x = self.conv3(x, edge_index)
x = self.relu(x)
x = gmp(x, batch) # global max pooling
# flatten
x = self.relu(self.fc_g1(x))
x = self.dropout(x)
x = self.fc_g2(x)
x = self.dropout(x)
# 1d conv layers
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt_1(embedded_xt)
# flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc1_xt(xt)
# concat
xc = torch.cat((x, xt), 1)
# add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
def forward(self, data1, data2):
out1 = self.forward_single(data1)
out2 = self.forward_single(data2)
return out1 - out2
| 2,456 | 30.101266 | 132 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/models/gat.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GATConv
from torch_geometric.nn import global_max_pool as gmp
# GAT model
class GATNet(torch.nn.Module):
def __init__(self, num_features_xd=78, n_output=1, num_features_xt=25,
n_filters=32, embed_dim=128, output_dim=128, dropout=0.2):
super(GATNet, self).__init__()
# graph layers
self.gcn1 = GATConv(num_features_xd, num_features_xd, heads=10, dropout=dropout)
self.gcn2 = GATConv(num_features_xd * 10, output_dim, dropout=dropout)
self.fc_g1 = nn.Linear(output_dim, output_dim)
# 1D convolution on protein sequence
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc_xt1 = nn.Linear(32*121, output_dim)
# combined layers
self.fc1 = nn.Linear(256, 1024)
self.fc2 = nn.Linear(1024, 256)
self.out = nn.Linear(256, n_output)
# activation and regularization
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward_single(self, data):
# graph input feed-forward
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.dropout(x, p=0.2, training=self.training)
x = F.elu(self.gcn1(x, edge_index))
x = F.dropout(x, p=0.2, training=self.training)
x = self.gcn2(x, edge_index)
x = self.relu(x)
x = gmp(x, batch) # global max pooling
x = self.fc_g1(x)
x = self.relu(x)
# protein input feed-forward:
target = data.target
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt1(embedded_xt)
conv_xt = self.relu(conv_xt)
# flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc_xt1(xt)
# concat
xc = torch.cat((x, xt), 1)
# add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
def forward(self, data1, data2):
out1 = self.forward_single(data1)
out2 = self.forward_single(data2)
return out1 - out2
| 2,418 | 31.689189 | 90 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/GraphDTA/models/gat_gcn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GCNConv, GATConv, GINConv, global_add_pool
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
# GCN-CNN based model
class GAT_GCN(torch.nn.Module):
def __init__(self, n_output=1, num_features_xd=78, num_features_xt=25,
n_filters=32, embed_dim=128, output_dim=128, dropout=0.2):
super(GAT_GCN, self).__init__()
self.n_output = n_output
self.conv1 = GATConv(num_features_xd, num_features_xd, heads=10)
self.conv2 = GCNConv(num_features_xd*10, num_features_xd*10)
self.fc_g1 = torch.nn.Linear(num_features_xd*10*2, 1500)
self.fc_g2 = torch.nn.Linear(1500, output_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
# 1D convolution on protein sequence
self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
self.fc1_xt = nn.Linear(32*121, output_dim)
# combined layers
self.fc1 = nn.Linear(256, 1024)
self.fc2 = nn.Linear(1024, 512)
self.out = nn.Linear(512, self.n_output) # n_output = 1 for regression task
def forward_single(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
target = data.target
# print('x shape = ', x.shape)
x = self.conv1(x, edge_index)
x = self.relu(x)
x = self.conv2(x, edge_index)
x = self.relu(x)
# apply global max pooling (gmp) and global mean pooling (gap)
x = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = self.relu(self.fc_g1(x))
x = self.dropout(x)
x = self.fc_g2(x)
embedded_xt = self.embedding_xt(target)
conv_xt = self.conv_xt_1(embedded_xt)
# flatten
xt = conv_xt.view(-1, 32 * 121)
xt = self.fc1_xt(xt)
# concat
xc = torch.cat((x, xt), 1)
# add some dense layers
xc = self.fc1(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
xc = self.fc2(xc)
xc = self.relu(xc)
xc = self.dropout(xc)
out = self.out(xc)
return out
def forward(self, data1, data2):
out1 = self.forward_single(data1)
out2 = self.forward_single(data2)
return out1 - out2
| 2,495 | 33.666667 | 91 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/drug_target_interaction/batchdta/pairwise/Moltrans/helper/utils/paddle_io.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paddle IO
"""
from paddle.io import Dataset, IterableDataset
import paddle
import warnings
import bisect
class ConcatDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Arguments:
datasets (sequence): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
"""
Cumsum
"""
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
"""
Initialization
"""
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
"""
getlen function
"""
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
"""
getitem function
"""
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
"""
Cummulative sizes
"""
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
def _accumulate(iterable, fn=lambda x, y: x + y):
# _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = fn(total, element)
yield total
class Subset(Dataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths, generator=None):
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results, e.g.:
>>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
generator (Generator): from torch import default_generator, which is not use in paddle.
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = paddle.randperm(sum(lengths))
return [Subset(dataset, indices[offset - length: offset]) for offset, length in zip(_accumulate(lengths), lengths)]
setattr(paddle.io, "random_split", random_split)
| 4,237 | 29.489209 | 119 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/fewshot_molecular_property/chem_lib/models/relation.py | from collections import OrderedDict
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class MLP(nn.Layer):
def __init__(self, inp_dim, hidden_dim, num_layers,batch_norm=False, dropout=0.):
super(MLP, self).__init__()
layer_list = OrderedDict()
in_dim = inp_dim
for l in range(num_layers):
layer_list['fc{}'.format(l)] = nn.Linear(in_dim, hidden_dim)
if l < num_layers - 1:
if batch_norm:
layer_list['norm{}'.format(l)] = nn.BatchNorm1D(num_features=hidden_dim)
layer_list['relu{}'.format(l)] = nn.LeakyReLU()
if dropout > 0:
layer_list['drop{}'.format(l)] = nn.Dropout(p=dropout)
in_dim = hidden_dim
if num_layers > 0:
self.network = nn.Sequential()
for i in layer_list:
self.network.add_sublayer(i, layer_list[i])
else:
self.network = nn.Identity()
def forward(self, emb):
out = self.network(emb)
return out
class Attention(nn.Layer):
"""
Obtained from: github.com:rwightman/pytorch-image-models
"""
def __init__(self, dim, num_heads=1, attention_dropout=0.1, projection_dropout=0.1):
super().__init__()
self.num_heads = num_heads
head_dim = dim // self.num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias_attr=False)
self.attn_drop = nn.Dropout(attention_dropout)
def forward(self, x):
B, N, C = x.shape
qkv = paddle.transpose(self.qkv(x).reshape([B, N, 3, self.num_heads, C // self.num_heads]),(2, 0, 3, 1, 4))
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ paddle.transpose(k, [0, 1, 3, 2])) * self.scale
attn = F.softmax(attn, axis = -1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose((0,2,1,3)).reshape((B, N, C))
return x
class ContextMLP(nn.Layer):
def __init__(self, inp_dim, hidden_dim, num_layers,pre_fc=0,batch_norm=False, dropout=0.,ctx_head=1,):
super(ContextMLP, self).__init__()
self.pre_fc = pre_fc #0, 1
if self.pre_fc:
hidden_dim=int(hidden_dim//2)
self.attn_layer = Attention(hidden_dim,num_heads=ctx_head,attention_dropout=dropout)
self.mlp_proj = MLP(inp_dim=inp_dim, hidden_dim=hidden_dim, num_layers=num_layers,
batch_norm=batch_norm, dropout=dropout)
else:
self.attn_layer = Attention(inp_dim)
inp_dim=int(inp_dim*2)
self.mlp_proj = MLP(inp_dim=inp_dim, hidden_dim=hidden_dim, num_layers=num_layers,
batch_norm=batch_norm, dropout=dropout)
def forward(self, s_emb, q_emb):
if self.pre_fc:
s_emb = self.mlp_proj(s_emb)
q_emb = self.mlp_proj(q_emb)
n_support = s_emb.shape[0]
n_query = q_emb.shape[0]
s_emb_rep = paddle.expand(s_emb,[n_query, s_emb.shape[0], s_emb.shape[1]])
q_emb_rep = q_emb.unsqueeze(1)
all_emb = paddle.concat([s_emb_rep, q_emb_rep], 1)
orig_all_emb = all_emb
n_shot=int(n_support//2)
all_emb_meann = all_emb[:,:n_shot].mean(1)
all_emb_meanp = all_emb[:,n_shot:2*n_shot].mean(1)
neg_proto_emb = paddle.transpose(paddle.expand(all_emb_meann,[n_support + 1, all_emb_meann.shape[0], all_emb_meann.shape[1]]),(1,0,2))
pos_proto_emb = paddle.transpose(paddle.expand(all_emb_meanp,[n_support + 1, all_emb_meanp.shape[0], all_emb_meanp.shape[1]]),(1,0,2))
all_emb = paddle.stack([all_emb, neg_proto_emb,pos_proto_emb], 2)
q,s,n,d = all_emb.shape
x=all_emb.reshape((q*s, n, d))
attn_x =self.attn_layer(x)
attn_x=attn_x.reshape((q, s, n, d))
all_emb = attn_x[:,:,0,]
all_emb = paddle.concat([all_emb, orig_all_emb],axis = -1)
if not self.pre_fc:
all_emb = self.mlp_proj(all_emb)
return all_emb, None
class NodeUpdateNetwork(nn.Layer):
def __init__(self, inp_dim, out_dim, n_layer=2, edge_dim=2, batch_norm=False, dropout=0.0):
super(NodeUpdateNetwork, self).__init__()
# set size
self.edge_dim = edge_dim
num_dims_list = [out_dim] * n_layer # [num_features * r for r in ratio]
if n_layer > 1:
num_dims_list[0] = 2 * out_dim
# layers
layer_list = OrderedDict()
for l in range(len(num_dims_list)):
layer_list['conv{}'.format(l)] = nn.Conv2D(
in_channels=num_dims_list[l - 1] if l > 0 else (self.edge_dim + 1) * inp_dim,
out_channels=num_dims_list[l],
kernel_size=1,
bias_attr=False)
if batch_norm:
layer_list['norm{}'.format(l)] = nn.BatchNorm2D(num_features=num_dims_list[l])
layer_list['relu{}'.format(l)] = nn.LeakyReLU()
if dropout > 0 and l == (len(num_dims_list) - 1):
layer_list['drop{}'.format(l)] = nn.Dropout2D(p=dropout)
self.network = nn.Sequential()
for i in layer_list:
self.network.add_sublayer(i, layer_list[i])
def forward(self, node_feat, edge_feat):
# get size
num_tasks = node_feat.shape[0]
num_data = node_feat.shape[1]
# get eye matrix (batch_size x 2 x node_size x node_size)
diag_mask = 1.0 - paddle.expand(paddle.eye(num_data),[num_tasks, self.edge_dim, num_data, num_data])
# set diagonal as zero and normalize
edge_feat = F.normalize(edge_feat * diag_mask, p=1, axis=-1)
# compute attention and aggregate
aggr_feat = paddle.bmm(paddle.concat(paddle.split(edge_feat, 2, 1), self.edge_dim).squeeze(1), node_feat)
node_feat = paddle.transpose(paddle.concat([node_feat, paddle.concat(paddle.split(aggr_feat,2, 1), -1)], -1),(0, 2, 1))
# non-linear transform
node_feat = paddle.transpose(self.network(node_feat.unsqueeze(-1)),(0, 2, 1, 3)).squeeze(-1)
return node_feat
class EdgeUpdateNetwork(nn.Layer):
def __init__(self, in_features, hidden_features, n_layer=3, top_k=-1,
edge_dim=2, batch_norm=False, dropout=0.0, adj_type='dist', activation='softmax'):
super(EdgeUpdateNetwork, self).__init__()
self.top_k = top_k
self.adj_type = adj_type
self.edge_dim = edge_dim
self.activation = activation
num_dims_list = [hidden_features] * n_layer # [num_features * r for r in ratio]
if n_layer > 1:
num_dims_list[0] = 2 * hidden_features
if n_layer > 3:
num_dims_list[1] = 2 * hidden_features
# layers
layer_list = OrderedDict()
for l in range(len(num_dims_list)):
# set layer
layer_list['conv{}'.format(l)] = nn.Conv2D(in_channels=num_dims_list[l - 1] if l > 0 else in_features,
out_channels=num_dims_list[l],
kernel_size=1,
bias_attr=False)
if batch_norm:
layer_list['norm{}'.format(l)] = nn.BatchNorm2D(num_features=num_dims_list[l], )
layer_list['relu{}'.format(l)] = nn.LeakyReLU()
if dropout > 0:
layer_list['drop{}'.format(l)] = nn.Dropout2D(p=dropout)
layer_list['conv_out'] = nn.Conv2D(in_channels=num_dims_list[-1],
out_channels=1,
kernel_size=1)
self.sim_network = nn.Sequential()
for i in layer_list:
self.sim_network.add_sublayer(i, layer_list[i])
def softmax_with_mask(self, adj, mask=None):
if mask is not None:
adj_new = adj - (1 - mask.expand_as(adj)) * 1e8
else:
adj_new = adj
n_q, n_edge, n1, n2 = adj_new.shape
adj_new = adj_new.reshape((n_q * n_edge * n1, n2))
adj_new = F.softmax(adj_new, dim=-1)
adj_new = adj_new.reshape((n_q, n_edge, n1, n2))
return adj_new
def forward(self, node_feat, edge_feat=None): # x: bs*N*num_feat
# compute abs(x_i, x_j)
x_i = node_feat.unsqueeze(2)
x_j = paddle.transpose(x_i, (0,2,1,3))
x_ij = paddle.abs(x_i - x_j) # size: bs x fs X N x N (2,128,11,11)
x_ij = paddle.transpose(x_ij, (0,3,2,1))
if self.adj_type == 'sim':
x_ij = paddle.exp(-x_ij)
sim_val = self.sim_network(x_ij)
diag_mask = 1.0 - paddle.expand(paddle.eye(node_feat.shape[1]),[node_feat.shape[0], 1, node_feat.shape[1], node_feat.shape[1]])
if self.activation == 'softmax':
sim_val = self.softmax_with_mask(sim_val, diag_mask)
elif self.activation == 'sigmoid':
sim_val = F.sigmoid(sim_val) * diag_mask
else:
sim_val = sim_val * diag_mask
if self.edge_dim == 2:
if self.activation == 'softmax':
dsim_val = self.softmax_with_mask(1 - sim_val, diag_mask)
else:
dsim_val = (1 - sim_val) * diag_mask
adj_val = paddle.concat([sim_val, dsim_val], 1)
else:
adj_val = sim_val
if self.top_k > 0:
n_q, n_edge, n1, n2 = adj_val.shape
k = min(self.top_k,n1)
adj_temp = adj_val.reshape((n_q*n_edge*n1,n2))
topk, indices = paddle.topk(adj_temp, k)
mask = F.one_hot(indices,adj_temp.shape[1]).sum(1)
mask = mask.reshape((n_q, n_edge, n1, n2))
if self.activation == 'softmax':
adj_val = self.softmax_with_mask(adj_val, mask)
else:
adj_val = adj_val * mask
return adj_val, edge_feat
class TaskAwareRelation(nn.Layer):
def __init__(self, inp_dim, hidden_dim, num_layers, edge_n_layer, num_class=2,
res_alpha=0., top_k=-1, node_concat=True, batch_norm=False, dropout=0.0,
edge_dim=2, adj_type='sim', activation='softmax',pre_dropout=0.0):
super(TaskAwareRelation, self).__init__()
self.inp_dim = inp_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.node_concat = node_concat
self.res_alpha = res_alpha
self.dropout_rate = dropout
self.pre_dropout = pre_dropout
self.adj_type=adj_type
node_n_layer = max(1, min(int(edge_n_layer // 2), 2))
gnn_inp_dim = self.inp_dim
if self.pre_dropout>0:
self.predrop1 = nn.Dropout(p=self.pre_dropout)
self.layer_edge = nn.LayerList()
self.layer_node = nn.LayerList()
for i in range(self.num_layers):
module_w = EdgeUpdateNetwork(in_features=gnn_inp_dim, hidden_features=hidden_dim, n_layer=edge_n_layer,
top_k=top_k,
edge_dim=edge_dim, batch_norm=batch_norm, adj_type=adj_type,
activation=activation, dropout=dropout if i < self.num_layers - 1 else 0.0)
module_l = NodeUpdateNetwork(inp_dim=gnn_inp_dim, out_dim=hidden_dim, n_layer=node_n_layer,
edge_dim=edge_dim, batch_norm=batch_norm,
dropout=dropout if i < self.num_layers - 1 else 0.0)
self.layer_edge.append(module_w)
self.layer_node.append(module_l)
if self.node_concat:
gnn_inp_dim = gnn_inp_dim + hidden_dim
else:
gnn_inp_dim = hidden_dim
self.fc1 = nn.Sequential(nn.Linear(gnn_inp_dim, inp_dim), nn.LeakyReLU())
if self.pre_dropout>0:
self.predrop2 = nn.Dropout(p=self.pre_dropout)
self.fc2 = nn.Linear(inp_dim, num_class)
assert 0 <= res_alpha <= 1
def forward(self, all_emb, q_emb=None, return_adj=False, return_emb=False):
node_feat=all_emb
if self.pre_dropout>0:
node_feat=self.predrop1(node_feat)
edge_feat_list = []
if return_adj:
x_i = node_feat.unsqueeze(2)
x_j = paddle.transpose(x_i, (1, 2))
init_adj = paddle.abs(x_i - x_j)
init_adj = paddle.transpose(init_adj, (1, 3))# size: bs x fs X N x N (2,128,11,11)
if self.adj_type == 'sim':
init_adj = paddle.exp(-init_adj)
diag_mask = 1.0 - paddle.expand(paddle.eye(node_feat.shape[1]),[node_feat.shape[0], 1, 1, 1])
init_adj = init_adj*diag_mask
edge_feat_list.append(init_adj)
for i in range(self.num_layers):
adj, _ = self.layer_edge[i](node_feat)
node_feat_new = self.layer_node[i](node_feat, adj)
if self.node_concat:
node_feat = paddle.concat([node_feat, node_feat_new], 2)
else:
node_feat = node_feat_new
edge_feat_list.append(adj)
if self.pre_dropout>0:
node_feat=self.predrop2(node_feat)
node_feat = self.fc1(node_feat)
node_feat = self.res_alpha * all_emb + node_feat
s_feat = node_feat[:, :-1, :]
q_feat = node_feat[:, -1, :]
s_logits = self.fc2(s_feat)
q_logits = self.fc2(q_feat)
if return_emb:
return s_logits, q_logits, edge_feat_list, s_feat,q_feat
else:
return s_logits, q_logits, edge_feat_list
| 13,630 | 40.685015 | 142 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/fewshot_molecular_property/chem_lib/models/maml.py |
import paddle.nn as nn
import paddlefsl.utils as utils
class MAML(nn.Layer):
def __init__(
self,
model,
lr,
first_order=False,
allow_unused=None,
allow_nograd=False,
anil=False,
):
super(MAML, self).__init__()
self.layers = model
self.lr = lr
self.first_order = first_order
self.allow_nograd = allow_nograd
if allow_unused is None:
allow_unused = allow_nograd
self.allow_unused = allow_unused
self.anil = anil
def forward(self, *args, **kwargs):
return self.layers(*args, **kwargs)
def clone(self, first_order=None, allow_unused=None, allow_nograd=None,anil=None):
"""
**Description**
Returns a `MAML`-wrapped copy of the module whose parameters and buffers
are `torch.clone`d from the original module.
This implies that back-propagating losses on the cloned module will
populate the buffers of the original module.
For more information, refer to learn2learn.clone_module().
**Arguments**
* **first_order** (bool, *optional*, default=None) - Whether the clone uses first-
or second-order updates. Defaults to self.first_order.
* **allow_unused** (bool, *optional*, default=None) - Whether to allow differentiation
of unused parameters. Defaults to self.allow_unused.
* **allow_nograd** (bool, *optional*, default=False) - Whether to allow adaptation with
parameters that have `requires_grad = False`. Defaults to self.allow_nograd.
"""
if anil is None:
anil = self.anil
if first_order is None:
first_order = self.first_order
if allow_unused is None:
allow_unused = self.allow_unused
if allow_nograd is None:
allow_nograd = self.allow_nograd
return MAML(
utils.clone_model(self.layers),
lr=self.lr,
first_order=first_order,
allow_unused=allow_unused,
allow_nograd=allow_nograd,
anil=anil
)
| 1,819 | 26.575758 | 89 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/fewshot_molecular_property/chem_lib/datasets/loader.py | import os
import json
import numpy as np
import paddle
import pgl.graph as G
from pahelix.datasets import InMemoryDataset
try:
from rdkit import Chem
from rdkit.Chem import AllChem
allowable_features = {
'possible_atomic_num_list' : list(range(1, 119)),
'possible_formal_charge_list' : [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
'possible_chirality_list' : [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
],
'possible_hybridization_list' : [
Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2, Chem.rdchem.HybridizationType.UNSPECIFIED
],
'possible_numH_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8],
'possible_implicit_valence_list' : [0, 1, 2, 3, 4, 5, 6],
'possible_degree_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'possible_bonds' : [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC
],
'possible_bond_dirs' : [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
]
}
except:
print('Error rdkit:')
Chem, AllChem, allowable_features=None,None, None
def mol_to_graph_data_obj_simple(mol):
"""
Converts rdkit mol object to graph Data object required by the pytorch
geometric package. NB: Uses simplified atom and bond features, and represent
as indices
:param mol: rdkit mol object
:return: graph data object with the attributes: x, edge_index, edge_attr
"""
# atoms
num_atom_features = 2 # atom type, chirality tag
atom_features_list = []
for atom in mol.GetAtoms():
atom_feature = [allowable_features['possible_atomic_num_list'].index(
atom.GetAtomicNum())] + [allowable_features[
'possible_chirality_list'].index(atom.GetChiralTag())]
atom_features_list.append(atom_feature)
x = np.array(atom_features_list, dtype='int64')
num_bond_features = 2 # bond type, bond direction
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = [allowable_features['possible_bonds'].index(
bond.GetBondType())] + [allowable_features[
'possible_bond_dirs'].index(
bond.GetBondDir())]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
for i in range(len(x)):
edges_list.append((i, i))
edge_features_list.append([4, 0])
edge_index = np.array(edges_list, dtype='int64')
edge_attr = np.array(edge_features_list, dtype='int64')
else: # mol has no bonds
edge_index = np.array([[0,0]], dtype = 'int64')
edge_attr = np.array([[4,0]], dtype = 'int64')
data = {'x':x, 'edge_index':edge_index, 'edge_attr':edge_attr}
return data
class MoleculeDataset(InMemoryDataset):
def __init__(self,
root,
transform=None,
pre_transform=None,
pre_filter=None,
dataset='zinc250k',
empty=False):
"""
Adapted from qm9.py. Disabled the download functionality
:param root: directory of the dataset, containing a raw and processed
dir. The raw dir should contain the file containing the smiles, and the
processed dir can either empty or a previously processed file
:param dataset: name of the dataset. Currently only implemented for
zinc250k, chembl_with_labels, tox21, hiv, bace, bbbp, clintox, esol,
freesolv, lipophilicity, muv, pcba, sider, toxcast
:param empty: if True, then will not load any data obj. For
initializing empty dataset
"""
self.dataset = dataset
self.root = root
super(MoleculeDataset, self).__init__()
if not os.path.exists(self.root + "/new/data.pdparams"):
self.read()
self.smiles_list = paddle.load(self.root + "/new/smiles.pdparams")[1]
data_list = paddle.load(self.root + "/new/data.pdparams")[1]
self.data_list = [G.Graph(i['edge_index'],i['x'].shape[0],
{'feature':i['x']}, {'feature':i['edge_attr']}) for i in data_list]
for i in range(len(self.data_list)):
self.data_list[i].y = data_list[i]['y']
x = 0
def read(self):
data_smiles_list = []
data_list = []
if self.dataset == 'tox21':
smiles_list, rdkit_mol_objs, labels = \
_load_tox21_dataset(self.root + "/raw/" + self.dataset + ".json")
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
## convert aromatic bonds to double bonds
#Chem.SanitizeMol(rdkit_mol,
#sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
data['id'] = np.array([i])
data['y'] = np.array(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'muv':
# smiles_list, rdkit_mol_objs, labels = \
# _load_muv_dataset(self.raw_paths[0])
smiles_list, rdkit_mol_objs, labels = \
_load_tox21_dataset(self.root + "/raw/" + self.dataset + ".json")
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data['id'] = np.array([i])
data['y'] = np.array(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'sider':
smiles_list, rdkit_mol_objs, labels = \
_load_tox21_dataset(self.root + "/raw/" + self.dataset + ".json")
for i in range(len(smiles_list)):
print(i)
rdkit_mol = rdkit_mol_objs[i]
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
# manually add mol id
data['id'] = np.array([i])
data['y'] = np.array(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
elif self.dataset == 'toxcast':
smiles_list, rdkit_mol_objs, labels = \
_load_tox21_dataset(self.root + "/raw/" + self.dataset + ".json")
for i in range(len(smiles_list)):
#print(i)
rdkit_mol = rdkit_mol_objs[i]
if rdkit_mol != None:
# # convert aromatic bonds to double bonds
# Chem.SanitizeMol(rdkit_mol,
# sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
data = mol_to_graph_data_obj_simple(rdkit_mol)
data['id'] = np.array([i])
data['y'] = np.array(labels[i, :])
data_list.append(data)
data_smiles_list.append(smiles_list[i])
else:
raise ValueError('Invalid dataset name')
self.smiles_list = data_smiles_list
paddle.save({1:data_list}, self.root + "/new/data.pdparams")
paddle.save({1:self.smiles_list}, self.root + "/new/smiles.pdparams")
def _load_tox21_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
# input_df = pd.read_csv(input_path, sep=',')
# smiles_list = input_df['smiles']
# rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
# tasks = ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
# 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53']
# labels = input_df[tasks]
# # convert 0 to -1
# labels = labels.replace(0, -1)
# # convert nan to 0
# labels = labels.fillna(0)
# assert len(smiles_list) == len(rdkit_mol_objs_list)
# assert len(smiles_list) == len(labels)
with open(input_path) as json_file:
binary_list = json.load(json_file)
smiles_list = []
for l in binary_list:
for i in l:
smiles_list.append(i)
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = np.zeros((len(smiles_list),1), dtype=int)
labels[len(binary_list[0]):,0] = 1
return smiles_list, rdkit_mol_objs_list, labels
def _load_muv_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
# input_df = pd.read_csv(input_path, sep=',')
# smiles_list = input_df['smiles']
# rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
# tasks = ['MUV-466', 'MUV-548', 'MUV-600', 'MUV-644', 'MUV-652', 'MUV-689',
# 'MUV-692', 'MUV-712', 'MUV-713', 'MUV-733', 'MUV-737', 'MUV-810',
# 'MUV-832', 'MUV-846', 'MUV-852', 'MUV-858', 'MUV-859']
# labels = input_df[tasks]
# # convert 0 to -1
# labels = labels.replace(0, -1)
# # convert nan to 0
# labels = labels.fillna(0)
# assert len(smiles_list) == len(rdkit_mol_objs_list)
# assert len(smiles_list) == len(labels)
with open(input_path) as json_file:
binary_list = json.load(json_file)
smiles_list = []
for l in binary_list:
for i in l:
smiles_list.append(i)
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = np.zeros((len(smiles_list),1), dtype=int)
labels[len(binary_list[0]):,0] = 1
return smiles_list, rdkit_mol_objs_list, labels
def _load_sider_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
with open(input_path) as json_file:
binary_list = json.load(json_file)
smiles_list = []
for l in binary_list:
for i in l:
smiles_list.append(i)
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = np.zeros((len(smiles_list),1), dtype=int)
labels[len(binary_list[0]):,0] = 1
# print(smiles_list)
# print(labels)
# raise TypeError
# input_df = pd.read_csv(input_path, sep=',')
# smiles_list = input_df['smiles']
# rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
# tasks = ['Hepatobiliary disorders',
# 'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders',
# 'Investigations', 'Musculoskeletal and connective tissue disorders',
# 'Gastrointestinal disorders', 'Social circumstances',
# 'Immune system disorders', 'Reproductive system and breast disorders',
# 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)',
# 'General disorders and administration site conditions',
# 'Endocrine disorders', 'Surgical and medical procedures',
# 'Vascular disorders', 'Blood and lymphatic system disorders',
# 'Skin and subcutaneous tissue disorders',
# 'Congenital, familial and genetic disorders',
# 'Infections and infestations',
# 'Respiratory, thoracic and mediastinal disorders',
# 'Psychiatric disorders', 'Renal and urinary disorders',
# 'Pregnancy, puerperium and perinatal conditions',
# 'Ear and labyrinth disorders', 'Cardiac disorders',
# 'Nervous system disorders',
# 'Injury, poisoning and procedural complications']
# labels = input_df[tasks]
# # convert 0 to -1
# labels = labels.replace(0, -1)
# assert len(smiles_list) == len(rdkit_mol_objs_list)
# assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_toxcast_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
# NB: some examples have multiple species, some example smiles are invalid
with open(input_path) as json_file:
binary_list = json.load(json_file)
smiles_list = []
for l in binary_list:
for i in l:
smiles_list.append(i)
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = np.zeros((len(smiles_list),1), dtype=int)
labels[len(binary_list[0]):,0] = 1
return smiles_list, rdkit_mol_objs_list, labels
# input_df = pd.read_csv(input_path, sep=',')
# smiles_list = input_df['smiles']
# rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
# # Some smiles could not be successfully converted
# # to rdkit mol object so them to None
# preprocessed_rdkit_mol_objs_list = [m if m != None else None for m in
# rdkit_mol_objs_list]
# preprocessed_smiles_list = [AllChem.MolToSmiles(m) if m != None else
# None for m in preprocessed_rdkit_mol_objs_list]
# tasks = list(input_df.columns)[1:]
# labels = input_df[tasks]
# # convert 0 to -1
# labels = labels.replace(0, -1)
# # convert nan to 0
# labels = labels.fillna(0)
# assert len(smiles_list) == len(preprocessed_rdkit_mol_objs_list)
# assert len(smiles_list) == len(preprocessed_smiles_list)
# assert len(smiles_list) == len(labels)
# return preprocessed_smiles_list, preprocessed_rdkit_mol_objs_list, \
# labels.values
def check_smiles_validity(smiles):
try:
m = Chem.MolFromSmiles(smiles)
if m:
return True
else:
return False
except:
return False
| 15,028 | 39.400538 | 91 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold-single/alphafold_paddle/model/modules.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules."""
import numpy as np
import paddle
import paddle.nn as nn
from paddle.fluid.framework import _dygraph_tracer
from paddle.distributed.fleet.utils import recompute
try:
from paddle import _legacy_C_ops as _C_ops
except:
from paddle import _C_ops
from alphafold_paddle.common import residue_constants
from alphafold_paddle.model.utils import mask_mean, subbatch
from alphafold_paddle.model import folding, lddt, quat_affine, all_atom
from alphafold_paddle.model.utils import init_gate_linear, init_final_linear
from alphafold_paddle.distributed import dap, bp
from alphafold_paddle.distributed.comm_group import scg
# Map head name in config to head name in model params
Head_names = {
'masked_msa': 'masked_msa_head',
'distogram': 'distogram_head',
'predicted_lddt': 'predicted_lddt_head',
'predicted_aligned_error': 'predicted_aligned_error_head',
'experimentally_resolved': 'experimentally_resolved_head', # finetune loss
}
def recompute_wrapper(func, *args, is_recompute=True):
"""Function wrapper for recompute"""
if is_recompute:
return recompute(func, *args)
else:
return func(*args)
def softmax_cross_entropy(logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels."""
loss = -paddle.sum(labels * paddle.nn.functional.log_softmax(logits), axis=-1)
return loss
def sigmoid_cross_entropy(logits, labels):
"""Computes sigmoid cross entropy given logits and multiple class labels."""
log_p = paddle.nn.functional.log_sigmoid(logits)
# log(1 - sigmoid(x)) = log_sigmoid(-x), the latter is more numerically stable
log_not_p = paddle.nn.functional.log_sigmoid(-logits)
loss = -labels * log_p - (1. - labels) * log_not_p
return loss
class Dropout(nn.Layer):
def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
super(Dropout, self).__init__()
if not isinstance(p, (float, int)):
raise TypeError("p argument should be a number")
if p < 0 or p > 1:
raise ValueError("p argument should between 0 and 1")
mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer
if mode not in ('downscale_in_infer', 'upscale_in_train'):
raise ValueError(
"mode argument should be 'downscale_in_infer' or 'upscale_in_train'"
)
if axis and not isinstance(axis, (int, list, tuple)):
raise TypeError("datatype of axis argument should be int or list")
self.p = p
self.axis = axis
self.mode = mode
self.name = name
def forward(self, input):
# fast return for p == 0
if self.p == 0:
return input
if self.axis == None:
out = nn.functional.dropout(input,
p=self.p,
axis=self.axis,
training=self.training,
mode=self.mode,
name=self.name)
else:
seed = None
drop_axes = [self.axis] if isinstance(self.axis, int) else list(self.axis)
if paddle.static.default_main_program().random_seed != 0:
seed = paddle.static.default_main_program().random_seed
out, mask = _C_ops.dropout_nd(input, 'dropout_prob', self.p, 'is_test',
not self.training, 'fix_seed', seed
is not None, 'seed',
seed if seed is not None else 0,
'dropout_implementation', self.mode, 'axis',
drop_axes)
return out
def extra_repr(self):
name_str = ', name={}'.format(self.name) if self.name else ''
return 'p={}, axis={}, mode={}{}'.format(self.p, self.axis, self.mode,
name_str)
class AlphaFold(nn.Layer):
"""AlphaFold model with recycling.
Jumper et al. (2021) Suppl. Alg. 2 "Inference"
"""
def __init__(self, channel_num, config):
super(AlphaFold, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = config.global_config
self.alphafold_iteration = AlphaFoldIteration(
self.channel_num, self.config, self.global_config)
def forward(self,
batch,
label,
ensemble_representations=False,
return_representations=False,
compute_loss=True):
"""Run the AlphaFold model.
Arguments:
batch: Dictionary with inputs to the AlphaFold model.
ensemble_representations: Whether to use ensembling of representations.
return_representations: Whether to also return the intermediate
representations.
Returns:
The output of AlphaFoldIteration is a nested dictionary containing
predictions from the various heads.
"""
inner_batch, num_residues = batch['aatype'].shape[1:]
def _get_prev(ret):
new_prev = {
'prev_pos': ret['structure_module']['final_atom_positions'],
'prev_msa_first_row': ret['representations']['msa_first_row'],
'prev_pair': ret['representations']['pair'],
}
for k in new_prev.keys():
new_prev[k].stop_gradient = True
return new_prev
def _run_single_recycling(prev, recycle_idx, compute_loss):
if self.config.resample_msa_in_recycling:
# (B, (R+1)*E, N, ...)
# B: batch size, R: recycling number,
# E: ensemble number, N: residue number
num_ensemble = inner_batch // (self.config.num_recycle + 1)
ensembled_batch = dict()
for k in batch.keys():
start = recycle_idx * num_ensemble
end = start + num_ensemble
ensembled_batch[k] = batch[k][:, start:end]
else:
# (B, E, N, ...)
num_ensemble = inner_batch
ensembled_batch = batch
non_ensembled_batch = prev
return self.alphafold_iteration(
ensembled_batch, label, non_ensembled_batch,
compute_loss=compute_loss,
ensemble_representations=ensemble_representations)
if self.config.num_recycle:
# aatype: (B, E, N), zeros_bn: (B, N)
zeros_bn = paddle.zeros_like(batch['aatype'][:, 0], dtype='float32')
emb_config = self.config.embeddings_and_evoformer
prev = {
'prev_pos': paddle.tile(
zeros_bn[..., None, None],
[1, 1, residue_constants.atom_type_num, 3]),
'prev_msa_first_row': paddle.tile(
zeros_bn[..., None],
[1, 1, emb_config.msa_channel]),
'prev_pair': paddle.tile(
zeros_bn[..., None, None],
[1, 1, num_residues, emb_config.pair_channel]),
}
if 'num_iter_recycling' in batch:
# Training trick: dynamic recycling number
num_iter = batch['num_iter_recycling'].numpy()[0, 0]
num_iter = min(int(num_iter), self.config.num_recycle)
else:
num_iter = self.config.num_recycle
for recycle_idx in range(num_iter):
ret = _run_single_recycling(prev, recycle_idx, compute_loss=False)
prev = _get_prev(ret)
else:
prev = {}
num_iter = 0
return _run_single_recycling(prev, num_iter, compute_loss=compute_loss)
class AlphaFoldIteration(nn.Layer):
"""A single recycling iteration of AlphaFold architecture.
Computes ensembled (averaged) representations from the provided features.
These representations are then passed to the various heads
that have been requested by the configuration file. Each head also returns a
loss which is combined as a weighted sum to produce the total loss.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 3-22
"""
def __init__(self, channel_num, config, global_config):
super(AlphaFoldIteration, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
# copy these config for later usage
self.channel_num['extra_msa_channel'] = config.embeddings_and_evoformer.extra_msa_channel
self.channel_num['msa_channel'] = config.embeddings_and_evoformer.msa_channel
self.channel_num['pair_channel'] = config.embeddings_and_evoformer.pair_channel
self.channel_num['seq_channel'] = config.embeddings_and_evoformer.seq_channel
self.evoformer = EmbeddingsAndEvoformer(
self.channel_num, self.config.embeddings_and_evoformer,
self.global_config)
Head_modules = {
'masked_msa': MaskedMsaHead,
'distogram': DistogramHead,
'structure_module': folding.StructureModule,
'predicted_lddt': PredictedLDDTHead,
'predicted_aligned_error': PredictedAlignedErrorHead,
'experimentally_resolved': ExperimentallyResolvedHead, # finetune loss
}
self.used_heads = []
for head_name, head_config in sorted(self.config.heads.items()):
if head_name not in Head_modules:
continue
self.used_heads.append(head_name)
module = Head_modules[head_name](
self.channel_num, head_config, self.global_config)
head_name_ = Head_names.get(head_name, head_name)
setattr(self, head_name_, module)
def forward(self,
ensembled_batch,
label,
non_ensembled_batch,
compute_loss=False,
ensemble_representations=False):
num_ensemble = ensembled_batch['seq_length'].shape[1]
if not ensemble_representations:
assert num_ensemble == 1
def _slice_batch(i):
b = {k: v[:, i] for k, v in ensembled_batch.items()}
b.update(non_ensembled_batch)
return b
batch0 = _slice_batch(0)
representations = self.evoformer(batch0)
# MSA representations are not ensembled
msa_representation = representations['msa']
del representations['msa']
# MaskedMSAHead is apply on batch0
label['bert_mask'] = batch0['bert_mask']
label['true_msa'] = batch0['true_msa']
label['residue_index'] = batch0['residue_index']
if ensemble_representations:
for i in range(1, num_ensemble):
batch = _slice_batch(i)
representations_update = self.evoformer(batch)
for k in representations.keys():
representations[k] += representations_update[k]
for k in representations.keys():
representations[k] /= num_ensemble + 0.0
representations['msa'] = msa_representation
ret = {'representations': representations}
def loss(head_name_, head_config, ret, head_name, filter_ret=True):
if filter_ret:
value = ret[head_name]
else:
value = ret
loss_output = getattr(self, head_name_).loss(value, label)
ret[head_name].update(loss_output)
loss = head_config.weight * ret[head_name]['loss']
return loss
def _forward_heads(representations, ret, batch0):
total_loss = 0.
for head_name, head_config in self._get_heads():
head_name_ = Head_names.get(head_name, head_name)
# Skip PredictedLDDTHead and PredictedAlignedErrorHead until
# StructureModule is executed.
if head_name in ('predicted_lddt', 'predicted_aligned_error'):
continue
else:
ret[head_name] = getattr(self, head_name_)(representations, batch0)
if 'representations' in ret[head_name]:
# Extra representations from the head. Used by the
# structure module to provide activations for the PredictedLDDTHead.
representations.update(ret[head_name].pop('representations'))
if compute_loss:
total_loss += loss(head_name_, head_config, ret, head_name)
if self.config.heads.get('predicted_lddt.weight', 0.0):
# Add PredictedLDDTHead after StructureModule executes.
head_name = 'predicted_lddt'
# Feed all previous results to give access to structure_module result.
head_name_ = Head_names.get(head_name, head_name)
head_config = self.config.heads[head_name]
ret[head_name] = getattr(self, head_name_)(representations, batch0)
if compute_loss:
total_loss += loss(head_name_, head_config, ret, head_name, filter_ret=False)
if ('predicted_aligned_error' in self.config.heads
and self.config.heads.get('predicted_aligned_error.weight', 0.0)):
# Add PredictedAlignedErrorHead after StructureModule executes.
head_name = 'predicted_aligned_error'
# Feed all previous results to give access to structure_module result.
head_config = self.config.heads[head_name]
head_name_ = Head_names.get(head_name, head_name)
ret[head_name] = getattr(self, head_name_)(representations, batch0)
if compute_loss:
total_loss += loss(head_name_, head_config, ret, head_name, filter_ret=False)
return ret, total_loss
tracer = _dygraph_tracer()
if tracer._amp_dtype == "bfloat16":
with paddle.amp.auto_cast(enable=False):
for key, value in representations.items():
if value.dtype in [paddle.fluid.core.VarDesc.VarType.BF16]:
temp_value = value.cast('float32')
temp_value.stop_gradient = value.stop_gradient
representations[key] = temp_value
for key, value in batch0.items():
if value.dtype in [paddle.fluid.core.VarDesc.VarType.BF16]:
temp_value = value.cast('float32')
temp_value.stop_gradient = value.stop_gradient
batch0[key] = temp_value
ret, total_loss = _forward_heads(representations, ret, batch0)
else:
ret, total_loss = _forward_heads(representations, ret, batch0)
if compute_loss:
return ret, total_loss
else:
return ret
def _get_heads(self):
assert 'structure_module' in self.used_heads
head_names = [h for h in self.used_heads]
for k in head_names:
yield k, self.config.heads[k]
class Attention(nn.Layer):
"""Multihead attention."""
def __init__(self, config, global_config, q_dim, kv_dim, output_dim):
super(Attention, self).__init__()
self.config = config
self.global_config = global_config
num_head = self.config.num_head
key_dim = self.config.get('key_dim', q_dim)
value_dim = self.config.get('value_dim', kv_dim)
# TODO(GuoxiaWang): delete non fuse_attention related code on dcu
self.fuse_attention = self.global_config.fuse_attention
self.merge_qkv = (q_dim == kv_dim)
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
self.key_dim = key_dim
self.value_dim = value_dim
self.qkv_w = None
self.query_w = None
self.key_w = None
self.value_w = None
if self.merge_qkv and self.fuse_attention:
self.qkv_w = paddle.create_parameter(
[3, num_head, key_dim, q_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
else:
self.query_w = paddle.create_parameter(
[q_dim, num_head, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.key_w = paddle.create_parameter(
[kv_dim, num_head, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.value_w = paddle.create_parameter(
[kv_dim, num_head, value_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.gating_w = None
self.gating_b = None
if self.config.gating:
self.gating_w = paddle.create_parameter(
[q_dim, num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
self.gating_b = paddle.create_parameter(
[num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(1.0))
if self.global_config.zero_init:
init = nn.initializer.Constant(0.0)
else:
init = nn.initializer.XavierUniform()
self.output_w = paddle.create_parameter(
[num_head, value_dim, output_dim], 'float32',
default_initializer=init)
self.output_b = paddle.create_parameter(
[output_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
def forward(self, q_data, m_data, bias, nonbatched_bias=None):
"""Builds Attention module.
Arguments:
q_data: A tensor of queries, shape [batch, row_size, N_queries, q_channels].
m_data: A tensor of memories from which the keys and values are
projected, shape [batch, row_size, N_keys, m_channels].
bias: A bias for the attention, shape [batch, row_size, num_head, N_queries, N_keys].
nonbatched_bias: Shared bias, shape [N_queries, N_keys].
Returns:
A float32 tensor of shape [batch_size, row_size, N_queries, output_dim].
"""
if self.fuse_attention:
if nonbatched_bias is not None:
nonbatched_bias = paddle.unsqueeze(nonbatched_bias, axis=1)
_, _, _, _, _, _, _, output = _C_ops.fused_gate_attention(
q_data, m_data, self.query_w, self.key_w, self.value_w, self.qkv_w, nonbatched_bias, bias, self.gating_w, self.gating_b,
self.output_w, self.output_b, 'has_gating', self.config.gating, 'merge_qkv', self.merge_qkv)
else:
c = self.key_dim ** (-0.5)
q = paddle.einsum('nbqa,ahc->nbqhc', q_data, self.query_w) * c
k = paddle.einsum('nbka,ahc->nbkhc', m_data, self.key_w)
v = paddle.einsum('nbka,ahc->nbkhc', m_data, self.value_w)
logits = paddle.einsum('nbqhc,nbkhc->nbhqk', q, k) + bias
if nonbatched_bias is not None:
logits += paddle.unsqueeze(nonbatched_bias, axis=1)
weights = nn.functional.softmax(logits)
weighted_avg = paddle.einsum('nbhqk,nbkhc->nbqhc', weights, v)
if self.config.gating:
gate_values = paddle.einsum('nbqc,chv->nbqhv', q_data,
self.gating_w) + self.gating_b
gate_values = nn.functional.sigmoid(gate_values)
weighted_avg *= gate_values
output = paddle.einsum('nbqhc,hco->nbqo', weighted_avg,
self.output_w) + self.output_b
return output
class GlobalAttention(nn.Layer):
"""Global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention" lines 2-7
"""
def __init__(self, config, global_config, q_dim, kv_dim, output_dim):
super(GlobalAttention, self).__init__()
self.config = config
self.global_config = global_config
num_head = self.config.num_head
key_dim = self.config.get('key_dim', q_dim)
value_dim = self.config.get('value_dim', kv_dim)
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
self.key_dim = key_dim
self.value_dim = value_dim
self.query_w = paddle.create_parameter(
[q_dim, num_head, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.key_w = paddle.create_parameter(
[kv_dim, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.value_w = paddle.create_parameter(
[kv_dim, value_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
if self.config.gating:
self.gating_w = paddle.create_parameter(
[q_dim, num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
self.gating_b = paddle.create_parameter(
[num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(1.0))
if self.global_config.zero_init:
init = nn.initializer.Constant(0.0)
else:
init = nn.initializer.XavierUniform()
self.output_w = paddle.create_parameter(
[num_head, value_dim, output_dim], 'float32',
default_initializer=init)
self.output_b = paddle.create_parameter(
[output_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
def forward(self, q_data, m_data, q_mask):
k = paddle.einsum('nbka,ac->nbkc', m_data, self.key_w)
v = paddle.einsum('nbka,ac->nbkc', m_data, self.value_w)
# NOTE: differ from non-global version using q_avg for attn
q_avg = mask_mean(q_mask, q_data, axis=2)
c = self.key_dim ** (-0.5)
q = paddle.einsum('nba,ahc->nbhc', q_avg, self.query_w) * c
q_mask_ = paddle.unsqueeze(q_mask, axis=2)[..., 0]
bias = 1e9 * (q_mask_ - 1.)
logits = paddle.einsum('nbhc,nbkc->nbhk', q, k) + bias
weights = nn.functional.softmax(logits)
weighted_avg = paddle.einsum('nbhk,nbkc->nbhc', weights, v)
if self.config.gating:
gate_values = paddle.einsum('nbqc,chv->nbqhv', q_data,
self.gating_w) + self.gating_b
gate_values = nn.functional.sigmoid(gate_values)
weighted_avg = paddle.unsqueeze(weighted_avg, axis=2)
weighted_avg *= gate_values
output = paddle.einsum('nbqhc,hco->nbqo', weighted_avg,
self.output_w) + self.output_b
else:
output = paddle.einsum('nbhc,hco->nbo', weighted_avg,
self.output_w) + self.output_b
output = paddle.unsqueeze(output, axis=-1)
return output
class MSARowAttentionWithPairBias(nn.Layer):
"""MSA per-row attention biased by the pair representation.
Jumper et al. (2021) Suppl. Alg. 7 "MSARowAttentionWithPairBias"
"""
def __init__(self, channel_num, config, global_config, is_extra_msa):
super(MSARowAttentionWithPairBias, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
assert config.orientation == 'per_row'
if is_extra_msa:
self.query_norm = nn.LayerNorm(channel_num['extra_msa_channel'])
else:
self.query_norm = nn.LayerNorm(channel_num['msa_channel'])
self.feat_2d_norm = nn.LayerNorm(channel_num['pair_channel'])
self.feat_2d_weights = paddle.create_parameter(
[channel_num['pair_channel'], self.config.num_head], 'float32',
default_initializer=nn.initializer.Normal(
std=1. / np.sqrt(channel_num['pair_channel'])))
if is_extra_msa:
extra_msa_channel = channel_num['extra_msa_channel']
self.attention = Attention(
self.config, self.global_config,
extra_msa_channel, extra_msa_channel, extra_msa_channel)
else:
msa_channel = channel_num['msa_channel']
self.attention = Attention(
self.config, self.global_config,
msa_channel, msa_channel, msa_channel)
def forward(self, msa_act, msa_mask, pair_act):
pair_act = self.feat_2d_norm(pair_act)
# [B, N_res//dap_size, N_res, cz], [cz, head] => [B, head, N_res//dap_size, N_res]
nonbatched_bias_before = paddle.einsum(
'nqkc,ch->nhqk', pair_act, self.feat_2d_weights)
# [B, head, N_res//dap_size, N_res] => [B, head, N_res, N_res]
nonbatched_bias = dap.all_gather(nonbatched_bias_before, axis=2)
nonbatched_bias = dap.all_gather_opp(nonbatched_bias, axis=2)
# [B, N_seq, N_res] => [B, N_seq//dap_size, N_res]
msa_mask = dap.scatter(msa_mask, axis=1)
bias = 1e9 * (msa_mask - 1.)
# [B, N_seq//dap_size, N_res] => [B, N_seq//dap_size, 1, 1, N_res]
bias = paddle.unsqueeze(bias, axis=[2, 3])
msa_act = self.query_norm(msa_act)
if not self.training or (self.is_extra_msa and self.config.use_subbatch):
# low memory mode using subbatch
subbatch_size = self.config.subbatch_size
if not self.training:
subbatch_size = self.global_config.subbatch_size
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
subbatch_size, 1, same_arg_idx={1: 0})
msa_act = sb_attn(msa_act, msa_act, bias, nonbatched_bias)
else:
msa_act = self.attention(msa_act, msa_act, bias, nonbatched_bias)
return msa_act
class MSAColumnGlobalAttention(nn.Layer):
"""MSA per-column global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention"
"""
def __init__(self, channel_num, config, global_config):
super(MSAColumnGlobalAttention, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
assert config.orientation == 'per_column'
extra_msa_channel = channel_num['extra_msa_channel']
self.query_norm = nn.LayerNorm(extra_msa_channel)
self.attention = GlobalAttention(
self.config, self.global_config,
extra_msa_channel, extra_msa_channel, extra_msa_channel)
def forward(self, msa_act, msa_mask):
# scatter if using dap, otherwise do nothing
# [B, N_seq, N_res] => [B, N_seq, N_res//dap_size]
msa_mask = dap.scatter(msa_mask, axis=2)
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
msa_mask = paddle.transpose(msa_mask, [0, 2, 1])
bias = 1e9 * (msa_mask - 1.)
bias = paddle.unsqueeze(bias, axis=[2, 3])
msa_mask = paddle.unsqueeze(msa_mask, axis=-1)
msa_act = self.query_norm(msa_act)
if not self.training:
# low memory mode using subbatch
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
self.global_config.subbatch_size, 1, same_arg_idx={1: 0})
msa_act = sb_attn(msa_act, msa_act, msa_mask)
else:
msa_act = self.attention(msa_act, msa_act, msa_mask)
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
return msa_act
class MSAColumnAttention(nn.Layer):
"""MSA per-column attention.
Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention"
"""
def __init__(self, channel_num, config, global_config):
super(MSAColumnAttention, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
assert config.orientation == 'per_column'
msa_channel = channel_num['msa_channel']
self.query_norm = nn.LayerNorm(msa_channel)
self.attention = Attention(
self.config, self.global_config,
msa_channel, msa_channel, msa_channel)
def forward(self, msa_act, msa_mask):
# scatter if using dap, otherwise do nothing
# [B, N_seq, N_res] => [B, N_seq, N_res//dap_size]
msa_mask = dap.scatter(msa_mask, axis=2)
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
msa_mask = paddle.transpose(msa_mask, [0, 2, 1])
bias = 1e9 * (msa_mask - 1.)
bias = paddle.unsqueeze(bias, axis=[2, 3])
msa_act = self.query_norm(msa_act)
if not self.training:
# low memory mode using subbatch
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
self.global_config.subbatch_size, 1, same_arg_idx={1: 0})
msa_act = sb_attn(msa_act, msa_act, bias)
else:
msa_act = self.attention(msa_act, msa_act, bias)
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
return msa_act
class Transition(nn.Layer):
"""Transition layer.
Jumper et al. (2021) Suppl. Alg. 9 "MSATransition"
Jumper et al. (2021) Suppl. Alg. 15 "PairTransition"
"""
def __init__(self, channel_num, config, global_config, is_extra_msa,
transition_type):
super(Transition, self).__init__()
assert transition_type in ['msa_transition', 'pair_transition']
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
self.transition_type = transition_type
if transition_type == 'msa_transition' and is_extra_msa:
in_dim = channel_num['extra_msa_channel']
elif transition_type == 'msa_transition' and not is_extra_msa:
in_dim = channel_num['msa_channel']
elif transition_type == 'pair_transition':
in_dim = channel_num['pair_channel']
self.input_layer_norm = nn.LayerNorm(in_dim)
self.transition1 = nn.Linear(
in_dim, int(in_dim * self.config.num_intermediate_factor),
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.KaimingNormal()))
if self.global_config.zero_init:
last_init = nn.initializer.Constant(0.0)
else:
last_init = nn.initializer.TruncatedNormal()
self.transition2 = nn.Linear(
int(in_dim * self.config.num_intermediate_factor), in_dim,
weight_attr=paddle.ParamAttr(initializer=last_init))
def forward(self, act, mask):
act = self.input_layer_norm(act)
def transition_module(x):
x = self.transition1(x)
x = nn.functional.relu(x)
x = self.transition2(x)
return x
if not self.training:
# low memory mode using subbatch
sb_transition = subbatch(transition_module, [0], [1],
self.global_config.subbatch_size, 1)
act = sb_transition(act)
else:
act = transition_module(act)
return act
class MaskedMsaHead(nn.Layer):
"""Head to predict MSA at the masked locations.
The MaskedMsaHead employs a BERT-style objective to reconstruct a masked
version of the full MSA, based on a linear projection of
the MSA representation.
Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction"
"""
def __init__(self, channel_num, config, global_config, name='masked_msa_head'):
super(MaskedMsaHead, self).__init__()
self.config = config
self.global_config = global_config
self.num_output = config.num_output
self.logits = nn.Linear(channel_num['msa_channel'], self.num_output, name='logits')
def forward(self, representations, batch):
"""Builds MaskedMsaHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'msa': MSA representation, shape [batch, N_seq, N_res, c_m].
batch: Batch, unused.
Returns:
Dictionary containing:
* 'logits': logits of shape [batch, N_seq, N_res, N_aatype] with
(unnormalized) log probabilies of predicted aatype at position.
"""
del batch
logits = self.logits(representations['msa'])
return {'logits': logits}
def loss(self, value, batch):
errors = softmax_cross_entropy(
labels=paddle.nn.functional.one_hot(batch['true_msa'], num_classes=self.num_output),
logits=value['logits'])
loss = (paddle.sum(errors * paddle.cast(batch['bert_mask'], dtype=errors.dtype), axis=[-2, -1]) /
(1e-8 + paddle.sum(batch['bert_mask'], axis=[-2, -1])))
return {'loss': loss}
class PredictedLDDTHead(nn.Layer):
"""Head to predict the per-residue LDDT to be used as a confidence measure.
Jumper et al. (2021) Suppl. Sec. 1.9.6 "Model confidence prediction (pLDDT)"
Jumper et al. (2021) Suppl. Alg. 29 "predictPerResidueLDDT_Ca"
"""
def __init__(self, channel_num, config, global_config, name='predicted_lddt_head'):
super(PredictedLDDTHead, self).__init__()
self.config = config
self.global_config = global_config
self.input_layer_norm = nn.LayerNorm(channel_num['seq_channel'],
name='input_layer_norm')
self.act_0 = nn.Linear(channel_num['seq_channel'],
self.config.num_channels, name='act_0')
self.act_1 = nn.Linear(self.config.num_channels,
self.config.num_channels, name='act_1')
self.logits = nn.Linear(self.config.num_channels,
self.config.num_bins, name='logits')
def forward(self, representations, batch):
"""Builds PredictedLDDTHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'structure_module': Single representation from the structure module,
shape [n_batch, N_res, c_s].
Returns:
Dictionary containing :
* 'logits': logits of shape [n_batch, N_res, N_bins] with
(unnormalized) log probabilies of binned predicted lDDT.
"""
act = representations['structure_module']
act = self.input_layer_norm(act)
act = nn.functional.relu(self.act_0(act))
act = nn.functional.relu(self.act_1(act))
logits = self.logits(act)
return dict(logits=logits)
def loss(self, value, batch):
# Shape (n_batch, num_res, 37, 3)
pred_all_atom_pos = value['structure_module']['final_atom_positions']
# Shape (n_batch, num_res, 37, 3)
true_all_atom_pos = paddle.cast(batch['all_atom_positions'], 'float32')
# Shape (n_batch, num_res, 37)
all_atom_mask = paddle.cast(batch['all_atom_mask'], 'float32')
# Shape (batch_size, num_res)
lddt_ca = lddt.lddt(
# Shape (batch_size, num_res, 3)
predicted_points=pred_all_atom_pos[:, :, 1, :],
# Shape (batch_size, num_res, 3)
true_points=true_all_atom_pos[:, :, 1, :],
# Shape (batch_size, num_res, 1)
true_points_mask=all_atom_mask[:, :, 1:2],
cutoff=15.,
per_residue=True)
lddt_ca = lddt_ca.detach()
# Shape (batch_size, num_res)
num_bins = self.config.num_bins
bin_index = paddle.floor(lddt_ca * num_bins)
# protect against out of range for lddt_ca == 1
bin_index = paddle.minimum(bin_index, paddle.to_tensor(num_bins - 1, dtype='float32'))
lddt_ca_one_hot = paddle.nn.functional.one_hot(paddle.cast(bin_index, 'int64'), num_classes=num_bins)
# Shape (n_batch, num_res, num_channel)
logits = value['predicted_lddt']['logits']
errors = softmax_cross_entropy(labels=lddt_ca_one_hot, logits=logits)
# Shape (num_res,)
mask_ca = all_atom_mask[:, :, residue_constants.atom_order['CA']]
mask_ca = paddle.to_tensor(mask_ca, dtype='float32')
loss = paddle.sum(errors * mask_ca, axis=-1) / (paddle.sum(mask_ca, axis=-1) + 1e-8)
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
resolution = paddle.squeeze(batch['resolution'], axis=-1)
loss *= paddle.cast((resolution >= self.config.min_resolution)
& (resolution <= self.config.max_resolution), 'float32')
output = {'loss': loss}
return output
class PredictedAlignedErrorHead(nn.Layer):
"""Head to predict the distance errors in the backbone alignment frames.
Can be used to compute predicted TM-Score.
Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction"
"""
def __init__(self, channel_num, config, global_config,
name='predicted_aligned_error_head'):
super(PredictedAlignedErrorHead, self).__init__()
self.config = config
self.global_config = global_config
self.logits = nn.Linear(channel_num['pair_channel'],
self.config.num_bins, name='logits')
def forward(self, representations, batch):
"""Builds PredictedAlignedErrorHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [B, N_res, N_res, c_z].
batch: Batch, unused.
Returns:
Dictionary containing:
* logits: logits for aligned error, shape [B, N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [N_bins - 1].
"""
logits = self.logits(representations['pair'])
breaks = paddle.linspace(0., self.config.max_error_bin,
self.config.num_bins-1)
return dict(logits=logits, breaks=breaks)
def loss(self, value, batch):
# Shape (B, num_res, 7)
predicted_affine = quat_affine.QuatAffine.from_tensor(
value['structure_module']['final_affines'])
# Shape (B, num_res, 7)
true_rot = paddle.to_tensor(batch['backbone_affine_tensor_rot'], dtype='float32')
true_trans = paddle.to_tensor(batch['backbone_affine_tensor_trans'], dtype='float32')
true_affine = quat_affine.QuatAffine(
quaternion=None,
translation=true_trans,
rotation=true_rot)
# Shape (B, num_res)
mask = batch['backbone_affine_mask']
# Shape (B, num_res, num_res)
square_mask = mask[..., None] * mask[:, None, :]
num_bins = self.config.num_bins
# (num_bins - 1)
breaks = value['predicted_aligned_error']['breaks']
# (B, num_res, num_res, num_bins)
logits = value['predicted_aligned_error']['logits']
# Compute the squared error for each alignment.
def _local_frame_points(affine):
points = [paddle.unsqueeze(x, axis=-2) for x in
paddle.unstack(affine.translation, axis=-1)]
return affine.invert_point(points, extra_dims=1)
error_dist2_xyz = [
paddle.square(a - b)
for a, b in zip(_local_frame_points(predicted_affine),
_local_frame_points(true_affine))]
error_dist2 = sum(error_dist2_xyz)
# Shape (B, num_res, num_res)
# First num_res are alignment frames, second num_res are the residues.
error_dist2 = error_dist2.detach()
sq_breaks = paddle.square(breaks)
true_bins = paddle.sum(paddle.cast((error_dist2[..., None] > sq_breaks), 'int32'), axis=-1)
errors = softmax_cross_entropy(
labels=paddle.nn.functional.one_hot(true_bins, num_classes=num_bins), logits=logits)
loss = (paddle.sum(errors * square_mask, axis=[-2, -1]) /
(1e-8 + paddle.sum(square_mask, axis=[-2, -1])))
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
resolution = paddle.squeeze(batch['resolution'], axis=-1)
loss *= paddle.cast((resolution >= self.config.min_resolution)
& (resolution <= self.config.max_resolution), 'float32')
output = {'loss': loss}
return output
class ExperimentallyResolvedHead(nn.Layer):
"""Predicts if an atom is experimentally resolved in a high-res structure.
Only trained on high-resolution X-ray crystals & cryo-EM.
Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction'
"""
def __init__(self, channel_num, config, global_config, name='experimentally_resolved_head'):
super(ExperimentallyResolvedHead, self).__init__()
self.config = config
self.global_config = global_config
self.logits = nn.Linear(channel_num['seq_channel'], 37, name='logits')
def forward(self, representations, batch):
"""Builds ExperimentallyResolvedHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'single': Single representation, shape [B, N_res, c_s].
batch: Batch, unused.
Returns:
Dictionary containing:
* 'logits': logits of shape [B, N_res, 37],
log probability that an atom is resolved in atom37 representation,
can be converted to probability by applying sigmoid.
"""
logits = self.logits(representations['single'])
return dict(logits=logits)
def loss(self, value, batch):
logits = value['logits']
assert len(logits.shape) == 3
# Does the atom appear in the amino acid?
atom_exists = batch['atom37_atom_exists']
# Is the atom resolved in the experiment? Subset of atom_exists,
# *except for OXT*
all_atom_mask = paddle.cast(batch['all_atom_mask'], 'float32')
xent = sigmoid_cross_entropy(labels=all_atom_mask, logits=logits)
loss = paddle.sum(xent * atom_exists, axis=[-2, -1]) / (1e-8 + paddle.sum(atom_exists, axis=[-2, -1]))
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
resolution = paddle.squeeze(batch['resolution'], axis=-1)
loss *= paddle.cast((resolution >= self.config.min_resolution)
& (resolution <= self.config.max_resolution), 'float32')
output = {'loss': loss}
return output
class DistogramHead(nn.Layer):
"""Head to predict a distogram.
Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction"
"""
def __init__(self, channel_num, config, name='distogram_head'):
super(DistogramHead, self).__init__()
self.config = config
# self.global_config = global_config
self.half_logits = nn.Linear(channel_num['pair_channel'],
self.config.num_bins, name='half_logits')
init_final_linear(self.half_logits)
def forward(self, representations, batch):
"""Builds DistogramHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [batch, N_res, N_res, c_z].
Returns:
Dictionary containing:
* logits: logits for distogram, shape [batch, N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [batch, N_bins - 1].
"""
half_logits = self.half_logits(representations['pair'])
logits = half_logits + paddle.transpose(half_logits, perm=[0, 2, 1, 3])
breaks = paddle.linspace(self.config.first_break, self.config.last_break,
self.config.num_bins - 1)
breaks = paddle.tile(breaks[None, :],
repeat_times=[logits.shape[0], 1])
return {
'logits': logits,
'bin_edges': breaks}
def loss(self, value, batch):
return _distogram_log_loss(value['logits'], value['bin_edges'],
batch, self.config.num_bins)
def _distogram_log_loss(logits, bin_edges, batch, num_bins):
"""Log loss of a distogram."""
positions = batch['pseudo_beta']
mask = batch['pseudo_beta_mask']
assert positions.shape[-1] == 3
sq_breaks = paddle.square(bin_edges).unsqueeze([1, 2])
dist2 = paddle.sum(
paddle.square(
paddle.unsqueeze(positions, axis=-2) -
paddle.unsqueeze(positions, axis=-3)),
axis=-1,
keepdim=True)
true_bins = paddle.sum(dist2 > sq_breaks, axis=-1)
errors = softmax_cross_entropy(
labels=paddle.nn.functional.one_hot(true_bins, num_classes=num_bins), logits=logits)
square_mask = paddle.unsqueeze(mask, axis=-2) * paddle.unsqueeze(mask, axis=-1)
avg_error = (
paddle.sum(errors * square_mask, axis=[-2, -1]) /
(1e-6 + paddle.sum(square_mask, axis=[-2, -1])))
dist2 = dist2[..., 0]
return {
'loss': avg_error,
'true_dist': paddle.sqrt(1e-6 + dist2)}
def dgram_from_positions(positions, num_bins, min_bin, max_bin):
lower_breaks = paddle.linspace(min_bin, max_bin, num_bins)
lower_breaks = paddle.square(lower_breaks)
upper_breaks = paddle.concat([lower_breaks[1:],
paddle.to_tensor([1e8], dtype='float32')])
def _squared_difference(x, y):
return paddle.square(x - y)
dist2 = paddle.sum(
_squared_difference(
paddle.unsqueeze(positions, axis=-2),
paddle.unsqueeze(positions, axis=-3)),
axis=-1, keepdim=True)
dgram = ((dist2 > lower_breaks.astype(dist2.dtype)).astype('float32') *
(dist2 < upper_breaks.astype(dist2.dtype)).astype('float32'))
return dgram
class EvoformerIteration(nn.Layer):
"""Single iteration (block) of Evoformer stack.
Jumper et al. (2021) Suppl. Alg. 6 "EvoformerStack" lines 2-10
"""
def __init__(self, channel_num, config, global_config, is_extra_msa=False):
super(EvoformerIteration, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
# Row-wise Gated Self-attention with Pair Bias
self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias(
channel_num, self.config.msa_row_attention_with_pair_bias,
self.global_config, is_extra_msa)
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_row_attention_with_pair_bias)
self.msa_row_attn_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
if not self.config.get('mute_msa_column', False):
if self.is_extra_msa:
self.msa_column_global_attention = MSAColumnGlobalAttention(
channel_num, config.msa_column_attention, global_config)
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_column_global_attention)
self.msa_col_attn_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
else:
self.msa_column_attention = MSAColumnAttention(
channel_num, config.msa_column_attention, global_config)
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_column_attention)
self.msa_col_attn_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.msa_transition = Transition(
channel_num, self.config.msa_transition, self.global_config,
is_extra_msa, 'msa_transition')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_transition)
self.msa_transition_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# OuterProductMean
self.outer_product_mean = OuterProductMean(channel_num,
self.config.outer_product_mean, self.global_config,
self.is_extra_msa, name='outer_product_mean')
# Dropout
dropout_rate, dropout_axis = self._parse_dropout_params(
self.outer_product_mean)
self.outer_product_mean_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# Triangle Multiplication.
self.triangle_multiplication_outgoing = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_outgoing, self.global_config,
name='triangle_multiplication_outgoing')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_outgoing)
self.triangle_outgoing_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_multiplication_incoming = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_incoming, self.global_config,
name='triangle_multiplication_incoming')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_incoming)
self.triangle_incoming_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# TriangleAttention.
self.triangle_attention_starting_node = TriangleAttention(channel_num,
self.config.triangle_attention_starting_node, self.global_config,
name='triangle_attention_starting_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_starting_node)
self.triangle_starting_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_attention_ending_node = TriangleAttention(channel_num,
self.config.triangle_attention_ending_node, self.global_config,
name='triangle_attention_ending_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_ending_node)
self.triangle_ending_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# Pair transition.
self.pair_transition = Transition(
channel_num, self.config.pair_transition, self.global_config,
is_extra_msa, 'pair_transition')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.pair_transition)
self.pair_transition_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
def _parse_dropout_params(self, module):
dropout_rate = 0.0 if self.global_config.deterministic else \
module.config.dropout_rate
dropout_axis = None
if module.config.shared_dropout:
dropout_axis = {
'per_row': [0, 2, 3],
'per_column': [0, 1, 3],
}[module.config.orientation]
return dropout_rate, dropout_axis
def forward(self, msa_act, pair_act, masks):
msa_mask, pair_mask = masks['msa'], masks['pair']
if self.global_config.origin_evoformer_structure:
residual = self.msa_row_attention_with_pair_bias(
msa_act, msa_mask, pair_act)
residual = self.msa_row_attn_dropout(residual)
msa_act = msa_act + residual
if self.is_extra_msa:
residual = self.msa_column_global_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
else:
residual = self.msa_column_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
residual = self.outer_product_mean(msa_act, msa_mask)
residual = self.outer_product_mean_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_incoming(pair_act, pair_mask)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_starting_node(pair_act, pair_mask)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_ending_node(pair_act, pair_mask)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
else:
if scg.get_bp_world_size() > 1:
# Note(GuoxiaWang): add zeros trigger the status of stop_gradient=False within recompute context.
pair_act = pair_act + paddle.zeros_like(pair_act)
# Note(GuoxiaWang): reduce the pair_act's gradient from msa branch and pair branch
if not pair_act.stop_gradient:
pair_act._register_grad_hook(bp.all_reduce)
if scg.get_bp_rank_in_group() == 0:
# [B, N_seq//dap_size, N_res, c_m]
residual = self.msa_row_attention_with_pair_bias(
msa_act, msa_mask, pair_act)
residual = self.msa_row_attn_dropout(residual)
msa_act = msa_act + residual
# [B, N_seq//dap_size, N_res, c_m] => [B, N_seq, N_res//dap_size, c_m]
msa_act = dap.row_to_col(msa_act)
if not self.config.get('mute_msa_column', False):
if self.is_extra_msa:
# [B, N_seq, N_res//dap_size, c_m]
residual = self.msa_column_global_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
else:
# [B, N_seq, N_res//dap_size, c_m]
residual = self.msa_column_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
# [B, N_seq, N_res//dap_size, c_m]
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
# [B, N_res//dap_size, N_res, c_z]
residual = self.outer_product_mean(msa_act, msa_mask)
outer_product_mean = self.outer_product_mean_dropout(residual)
# Note(GuoxiaWang): stop the gradient from pair transition
pair_act = pair_act.clone()
pair_act.stop_gradient = True
# [B, N_seq, N_res//dap_size, c_m] => [B, N_seq//dap_size, N_res, c_m]
msa_act = dap.col_to_row(msa_act)
if scg.get_bp_rank_in_group() == 1:
outer_product_mean = paddle.zeros_like(pair_act)
# Note(GuoxiaWang): enable gradient flow in backward
msa_act = msa_act.clone()
# scatter if using dap, otherwise do nothing
pair_mask_row = dap.scatter(pair_mask, axis=1)
pair_mask_col = dap.scatter(pair_mask, axis=2)
# [B, N_res//dap_size, N_res, c_z]
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask_row)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
# [B, N_res//dap_size, N_res, c_z] => [B, N_res, N_res//dap_size, c_z]
pair_act = dap.row_to_col(pair_act)
# [B, N_res, N_res//dap_size, c_z]
residual = self.triangle_multiplication_incoming(pair_act, pair_mask_col)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
# [B, N_res, N_res//dap_size, c_z] => [B, N_res//dap_size, N_res, c_z]
pair_act = dap.col_to_row(pair_act)
# [B, N_res//dap_size, N_res, c_z]
residual = self.triangle_attention_starting_node(pair_act, pair_mask_row)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
# [B, N_res//dap_size, N_res, c_z] => [B, N_res, N_res//dap_size, c_z]
pair_act = dap.row_to_col(pair_act)
# [B, N_res, N_res//dap_size, c_z]
residual = self.triangle_attention_ending_node(pair_act, pair_mask_col)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
# [B, N_res, N_res//dap_size, c_z] => [B, N_res//dap_size, N_res, c_z]
pair_act = dap.col_to_row(pair_act)
bp.broadcast(msa_act, 0)
bp.broadcast(pair_act, 1)
bp.broadcast(outer_product_mean, 0)
pair_act = pair_act + outer_product_mean
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
else:
# [B, N_seq//dap_size, N_res, c_m]
residual = self.msa_row_attention_with_pair_bias(
msa_act, msa_mask, pair_act)
residual = self.msa_row_attn_dropout(residual)
msa_act = msa_act + residual
# [B, N_seq//dap_size, N_res, c_m] => [B, N_seq, N_res//dap_size, c_m]
msa_act = dap.row_to_col(msa_act)
if not self.config.get('mute_msa_column', False):
if self.is_extra_msa:
# [B, N_seq, N_res//dap_size, c_m]
residual = self.msa_column_global_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
else:
# [B, N_seq, N_res//dap_size, c_m]
residual = self.msa_column_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
# [B, N_seq, N_res//dap_size, c_m]
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
# [B, N_res//dap_size, N_res, c_z]
residual = self.outer_product_mean(msa_act, msa_mask)
outer_product_mean = self.outer_product_mean_dropout(residual)
# [B, N_seq, N_res//dap_size, c_m] => [B, N_seq//dap_size, N_res, c_m]
msa_act = dap.col_to_row(msa_act)
# scatter if using dap, otherwise do nothing
pair_mask_row = dap.scatter(pair_mask, axis=1)
pair_mask_col = dap.scatter(pair_mask, axis=2)
# [B, N_res//dap_size, N_res, c_z]
# TODO(GuoxiaWang): why have diffrence whether remove pair_act = pair_act.clone()
pair_act = pair_act.clone()
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask_row)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
# [B, N_res//dap_size, N_res, c_z] => [B, N_res, N_res//dap_size, c_z]
pair_act = dap.row_to_col(pair_act)
# [B, N_res, N_res//dap_size, c_z]
residual = self.triangle_multiplication_incoming(pair_act, pair_mask_col)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
# [B, N_res, N_res//dap_size, c_z] => [B, N_res//dap_size, N_res, c_z]
pair_act = dap.col_to_row(pair_act)
# [B, N_res//dap_size, N_res, c_z]
residual = self.triangle_attention_starting_node(pair_act, pair_mask_row)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
# [B, N_res//dap_size, N_res, c_z] => [B, N_res, N_res//dap_size, c_z]
pair_act = dap.row_to_col(pair_act)
# [B, N_res, N_res//dap_size, c_z]
residual = self.triangle_attention_ending_node(pair_act, pair_mask_col)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
# [B, N_res, N_res//dap_size, c_z] => [B, N_res//dap_size, N_res, c_z]
pair_act = dap.col_to_row(pair_act)
pair_act = pair_act + outer_product_mean
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
return msa_act, pair_act
class EmbeddingsAndEvoformer(nn.Layer):
"""Embeds the input data and runs Evoformer.
Produces the MSA, single and pair representations.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5-18
"""
def __init__(self, channel_num, config, global_config):
super(EmbeddingsAndEvoformer, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
# InputEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5
# Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder"
self.preprocess_1d = nn.Linear(channel_num['target_feat'],
self.config.msa_channel, name='preprocess_1d')
self.preprocess_msa = nn.Linear(channel_num['msa_feat'],
self.config.msa_channel, name='preprocess_msa')
self.left_single = nn.Linear(channel_num['target_feat'], self.config.pair_channel,
name='left_single')
self.right_single = nn.Linear(channel_num['target_feat'], self.config.pair_channel,
name='right_single')
# RecyclingEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6
# Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder"
if self.config.recycle_pos:
self.prev_pos_linear = nn.Linear(self.config.prev_pos.num_bins,
self.config.pair_channel)
# RelPosEmbedder
# Jumper et al. (2021) Suppl. Alg. 4 "relpos"
# Jumper et al. (2021) Suppl. Alg. 5 "one_hot"
if self.config.max_relative_feature:
self.pair_activiations = nn.Linear(
2 * self.config.max_relative_feature + 1,
self.config.pair_channel)
if self.config.recycle_features:
self.prev_msa_first_row_norm = nn.LayerNorm(
self.config.msa_channel)
self.prev_pair_norm = nn.LayerNorm(self.config.pair_channel)
# Embed templates into the pair activations.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13
if self.config.template.enabled:
self.channel_num['template_angle'] = 57
self.channel_num['template_pair'] = 88
self.template_embedding = TemplateEmbedding(
self.channel_num, self.config.template, self.global_config)
if not self.config.get('mute_extra_msa', False):
# ExtraMSAEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16
self.extra_msa_activations = nn.Linear(
25, # 23 (20aa+unknown+gap+mask) + 1 (has_del) + 1 (del_val)
self.config.extra_msa_channel)
# Extra MSA Stack.
# Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack"
self.extra_msa_stack = nn.LayerList()
for _ in range(self.config.extra_msa_stack_num_block):
self.extra_msa_stack.append(EvoformerIteration(
self.channel_num, self.config.evoformer, self.global_config,
is_extra_msa=True))
# Embed templates torsion angles
if self.config.template.enabled and self.config.template.embed_torsion_angles:
c = self.config.msa_channel
self.template_single_embedding = nn.Linear(
self.channel_num['template_angle'], c)
self.template_projection = nn.Linear(c, c)
# Main trunk of the network
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18
self.evoformer_iteration = nn.LayerList()
for _ in range(self.config.evoformer_num_block):
self.evoformer_iteration.append(EvoformerIteration(
self.channel_num, self.config.evoformer, self.global_config,
is_extra_msa=False))
self.single_activations = nn.Linear(
self.config.msa_channel, self.config.seq_channel)
def _pseudo_beta_fn(self, aatype, all_atom_positions, all_atom_masks):
gly_id = paddle.ones_like(aatype) * residue_constants.restype_order['G']
is_gly = paddle.equal(aatype, gly_id)
ca_idx = residue_constants.atom_order['CA']
cb_idx = residue_constants.atom_order['CB']
n = len(all_atom_positions.shape)
pseudo_beta = paddle.where(
paddle.tile(paddle.unsqueeze(is_gly, axis=-1),
[1] * len(is_gly.shape) + [3]),
paddle.squeeze(
all_atom_positions.slice([n-2], [ca_idx], [ca_idx+1]),
axis=-2),
paddle.squeeze(
all_atom_positions.slice([n-2], [cb_idx], [cb_idx+1]),
axis=-2))
if all_atom_masks is not None:
m = len(all_atom_masks)
pseudo_beta_mask = paddle.where(
is_gly,
paddle.squeeze(
all_atom_masks.slice([m-1], [ca_idx], [ca_idx+1]),
axis=-1),
paddle.squeeze(
all_atom_masks.slice([m-1], [cb_idx], [cb_idx+1]),
axis=-1))
pseudo_beta_mask = paddle.squeeze(pseudo_beta_mask, axis=-1)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def _create_extra_msa_feature(self, batch):
# 23: 20aa + unknown + gap + bert mask
msa_1hot = nn.functional.one_hot(batch['extra_msa'], 23)
msa_feat = [msa_1hot,
paddle.unsqueeze(batch['extra_has_deletion'], axis=-1),
paddle.unsqueeze(batch['extra_deletion_value'], axis=-1)]
return paddle.concat(msa_feat, axis=-1)
def forward(self, batch):
# InputEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5
# Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder"
if 'tape_single' in batch:
msa_activations = paddle.unsqueeze(batch['tape_single'], axis=1) # add msa dimention
pair_activations = batch['tape_pair']
else:
preprocess_1d = self.preprocess_1d(batch['target_feat'])
# preprocess_msa = self.preprocess_msa(batch['msa_feat'])
msa_activations = paddle.unsqueeze(preprocess_1d, axis=1) + \
self.preprocess_msa(batch['msa_feat'])
right_single = self.right_single(batch['target_feat']) # 1, n_res, 22 -> 1, n_res, 128
right_single = paddle.unsqueeze(right_single, axis=1) # 1, n_res, 128 -> 1, 1, n_res, 128
left_single = self.left_single(batch['target_feat']) # 1, n_res, 22 -> 1, n_res, 128
left_single = paddle.unsqueeze(left_single, axis=2) # 1, n_res, 128 -> 1, n_res, 1, 128
pair_activations = left_single + right_single
mask_2d = paddle.unsqueeze(batch['seq_mask'], axis=1) * paddle.unsqueeze(batch['seq_mask'], axis=2)
# Inject previous outputs for recycling.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6
# Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder"
if self.config.recycle_pos and 'prev_pos' in batch:
prev_pseudo_beta = self._pseudo_beta_fn(
batch['aatype'], batch['prev_pos'], None)
dgram = dgram_from_positions(
prev_pseudo_beta, **self.config.prev_pos)
pair_activations += self.prev_pos_linear(dgram)
if self.config.recycle_features:
if 'prev_msa_first_row' in batch:
prev_msa_first_row = self.prev_msa_first_row_norm(
batch['prev_msa_first_row'])
# A workaround for `jax.ops.index_add`
msa_first_row = paddle.squeeze(msa_activations[:, 0, :], axis=1)
msa_first_row += prev_msa_first_row
msa_first_row = paddle.unsqueeze(msa_first_row, axis=1)
msa_activations = paddle.concat([msa_first_row, msa_activations[:, 1:, :]], axis=1)
if 'prev_pair' in batch:
pair_activations += self.prev_pair_norm(batch['prev_pair'])
# RelPosEmbedder
# Jumper et al. (2021) Suppl. Alg. 4 "relpos"
# Jumper et al. (2021) Suppl. Alg. 5 "one_hot"
if self.config.max_relative_feature:
pos = batch['residue_index'] # [bs, N_res]
offset = paddle.unsqueeze(pos, axis=[-1]) - \
paddle.unsqueeze(pos, axis=[-2])
rel_pos = nn.functional.one_hot(
paddle.clip(
offset + self.config.max_relative_feature,
min=0,
max=2 * self.config.max_relative_feature),
2 * self.config.max_relative_feature + 1)
rel_pos_bias = self.pair_activiations(rel_pos)
pair_activations += rel_pos_bias
# TemplateEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13
if self.config.template.enabled:
template_batch = {k: batch[k] for k in batch
if k.startswith('template_')}
template_pair_repr = self.template_embedding(
pair_activations, template_batch, mask_2d)
pair_activations += template_pair_repr
if not self.config.get('mute_extra_msa', False):
# ExtraMSAEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16
extra_msa_feat = self._create_extra_msa_feature(batch)
extra_msa_activations = self.extra_msa_activations(extra_msa_feat)
# ==================================================
# Extra MSA Stack
# Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack"
# ==================================================
extra_msa_stack_input = {
'msa': extra_msa_activations,
'pair': pair_activations,
}
if scg.get_bp_world_size() > 1:
extra_msa_stack_input['msa'] = bp.broadcast_grad_for_backward(extra_msa_stack_input['msa'], 0)
# scatter if using dap, otherwise do nothing
# [B, N_seq, N_res, c_m] => [B, N_seq//dap_size, N_res, c_m]
extra_msa_stack_input['msa'] = dap.scatter(extra_msa_stack_input['msa'], axis=1)
# [B, N_res, N_res, c_z] => [B, N_res//dap_size, N_res, c_z]
extra_msa_stack_input['pair'] = dap.scatter(extra_msa_stack_input['pair'], axis=1)
for idx, extra_msa_stack_iteration in enumerate(self.extra_msa_stack):
extra_msa_act, extra_pair_act = recompute_wrapper(extra_msa_stack_iteration,
extra_msa_stack_input['msa'],
extra_msa_stack_input['pair'],
{'msa': batch['extra_msa_mask'],
'pair': mask_2d},
is_recompute=self.training and idx >= self.config.extra_msa_stack_recompute_start_block_index)
extra_msa_stack_output = {
'msa': extra_msa_act,
'pair': extra_pair_act}
extra_msa_stack_input = {
'msa': extra_msa_stack_output['msa'],
'pair': extra_msa_stack_output['pair']}
# gather if using dap, otherwise do nothing
# [B, N_res//dap_size, N_res, c_z] => [B, N_res, N_res, c_z]
extra_msa_stack_output['pair'] = dap.gather(extra_msa_stack_output['pair'], axis=1)
else:
extra_msa_stack_output = {'pair': pair_activations}
evoformer_input = {
'msa': msa_activations,
'pair': extra_msa_stack_output['pair'],
}
evoformer_masks = {
'msa': batch['msa_mask'],
'pair': mask_2d,
}
# ==================================================
# Template angle feat
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 7-8
# ==================================================
if self.config.template.enabled and self.config.template.embed_torsion_angles:
num_templ, num_res = batch['template_aatype'].shape[1:]
aatype_one_hot = nn.functional.one_hot(batch['template_aatype'], 22)
# Embed the templates aatype, torsion angles and masks.
# Shape (templates, residues, msa_channels)
ret = all_atom.atom37_to_torsion_angles(
aatype=batch['template_aatype'],
all_atom_pos=batch['template_all_atom_positions'],
all_atom_mask=batch['template_all_atom_masks'],
# Ensure consistent behaviour during testing:
placeholder_for_undefined=not self.global_config.zero_init)
template_features = paddle.concat([
aatype_one_hot,
paddle.reshape(ret['torsion_angles_sin_cos'],
[-1, num_templ, num_res, 14]),
paddle.reshape(ret['alt_torsion_angles_sin_cos'],
[-1, num_templ, num_res, 14]),
ret['torsion_angles_mask']], axis=-1)
template_activations = self.template_single_embedding(
template_features)
template_activations = nn.functional.relu(template_activations)
template_activations = self.template_projection(template_activations)
# Concatenate the templates to the msa.
evoformer_input['msa'] = paddle.concat(
[evoformer_input['msa'], template_activations], axis=1)
# Concatenate templates masks to the msa masks.
# Use mask from the psi angle, as it only depends on the backbone atoms
# from a single residue.
torsion_angle_mask = ret['torsion_angles_mask'][..., 2]
torsion_angle_mask = torsion_angle_mask.astype(
evoformer_masks['msa'].dtype)
evoformer_masks['msa'] = paddle.concat(
[evoformer_masks['msa'], torsion_angle_mask], axis=1)
if scg.get_bp_world_size() > 1:
evoformer_input['msa'] = bp.broadcast_grad_for_backward(evoformer_input['msa'], 0)
# scatter if using dap, otherwise do nothing
# [B, N_seq, N_res, c_m] => [B, N_seq//dap_size, N_res, c_m]
evoformer_input['msa'] = dap.scatter(evoformer_input['msa'], axis=1)
# [B, N_res, N_res, c_z] => [B, N_res//dap_size, N_res, c_z]
evoformer_input['pair'] = dap.scatter(evoformer_input['pair'], axis=1)
# ==================================================
# Main MSA Stack
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18
# ==================================================
for idx, evoformer_block in enumerate(self.evoformer_iteration):
msa_act, pair_act = recompute_wrapper(evoformer_block,
evoformer_input['msa'],
evoformer_input['pair'],
evoformer_masks,
is_recompute=self.training and idx >= self.config.evoformer_recompute_start_block_index)
evoformer_output = {
'msa': msa_act,
'pair': pair_act}
evoformer_input = {
'msa': evoformer_output['msa'],
'pair': evoformer_output['pair'],
}
# gather if using dap, otherwise do nothing
# [B, N_seq//dap_size, N_res, c_m] => [B, N_seq, N_res, c_m]
evoformer_output['msa'] = dap.gather(evoformer_output['msa'], axis=1)
# [B, N_res//dap_size, N_res, c_z] => [B, N_res, N_res, c_z]
evoformer_output['pair'] = dap.gather(evoformer_output['pair'], axis=1)
msa_activations = evoformer_output['msa']
pair_activations = evoformer_output['pair']
single_activations = self.single_activations(msa_activations[:, 0])
num_seq = batch['msa_feat'].shape[1]
output = {
'single': single_activations,
'pair': pair_activations,
# Crop away template rows such that they are not used
# in MaskedMsaHead.
'msa': msa_activations[:, :num_seq],
'msa_first_row': msa_activations[:, 0],
}
return output
class OuterProductMean(nn.Layer):
"""Computes mean outer product.
Jumper et al. (2021) Suppl. Alg. 10 "OuterProductMean"
"""
def __init__(self, channel_num, config, global_config, is_extra_msa, name='outer_product_mean'):
super(OuterProductMean, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
if is_extra_msa:
c_m = channel_num['extra_msa_channel']
else:
c_m = channel_num['msa_channel']
self.layer_norm_input = nn.LayerNorm(c_m, name='layer_norm_input')
self.left_projection = nn.Linear(
c_m, self.config.num_outer_channel, name='left_projection')
self.right_projection = nn.Linear(
c_m, self.config.num_outer_channel, name='right_projection')
if self.global_config.zero_init:
init_w = nn.initializer.Constant(value=0.0)
else:
init_w = nn.initializer.KaimingNormal()
self.output_w = paddle.create_parameter(
[self.config.num_outer_channel, self.config.num_outer_channel, channel_num['pair_channel']],
'float32', default_initializer=init_w)
self.output_b = paddle.create_parameter(
[channel_num['pair_channel']], 'float32',
default_initializer=nn.initializer.Constant(value=0.0))
def forward(self, act, mask):
"""Builds OuterProductMean module.
Arguments:
act: MSA representation, shape [batch, N_seq, N_res, c_m].
mask: MSA mask, shape [batch, N_seq, N_res].
Returns:
Update to pair representation, shape [batch, N_res, N_res, c_z].
"""
# [B, N_seq, N_res//dap_size, c_m]
act = self.layer_norm_input(act)
# [B, N_seq, N_res//dap_size, c_m] => [B, N_seq, N_res//dap_size, num_outer_channel]
right_act_before = self.right_projection(act)
# [B, N_seq, N_res//dap_size, num_outer_channel] => [B, N_seq, N_res, num_outer_channel]
right_act = dap.all_gather(right_act_before, axis=2)
# [B, N_seq, N_res//dap_size, c_m] => [B, N_seq, N_res//dap_size, num_outer_channel]
left_act = self.left_projection(act)
# [B, N_seq, N_res] => [B, N_seq, N_res, 1]
mask = paddle.unsqueeze(mask, axis=-1)
# [B, N_seq, N_res, 1] => [B, N_seq, N_res//dap_size, 1]
mask_col = dap.scatter(mask, axis=2)
left_act = mask_col * left_act
# [B, N_seq, N_res//dap_size, 1], [B, N_seq, N_res, 1] => [B, N_res//dap_size, N_res, 1]
epsilon = 1e-3
norm = paddle.einsum('nabc,nadc->nbdc', mask_col, mask) + epsilon
def compute_chunk(left_act, right_act):
# This is equivalent to
#
# act = jnp.einsum('abc,ade->dceb', left_act, right_act)
# act = jnp.einsum('dceb,cef->bdf', act, output_w) + output_b
#
# but faster. maybe for subbatch inference?
# [B, N_seq, N_res//dap_size, num_outer_channel] => [B, N_seq, num_outer_channel, N_res//dap_size]
left_act = left_act.transpose([0, 1, 3, 2])
# wait if using async communication and dap, otherwise do nothing
right_act_after = dap.all_gather_opp(right_act, axis=2)
# [B, N_seq, num_outer_channel, N_res//dap_size], [B, N_seq, N_res, num_outer_channel]
# => [B, N_res, num_outer_channel, num_outer_channel, N_res//dap_size]
act = paddle.einsum('nacb,nade->ndceb', left_act, right_act_after)
# [B, N_res, num_outer_channel, num_outer_channel, N_res//dap_size], [num_outer_channel, num_outer_channel, c_z]
# => [B, N_res, N_res//dap_size, c_z]
act = paddle.einsum('ndceb,cef->ndbf', act, self.output_w) + self.output_b
# [B, N_res, N_res//dap_size, c_z] => [B, N_res//dap_size, N_res, c_z]
return act.transpose([0, 2, 1, 3])
if not self.training:
# low memory mode using subbatch
sb_chunk = subbatch(compute_chunk, [0], [2],
self.config.chunk_size, 1)
act = sb_chunk(left_act, right_act)
else:
act = compute_chunk(left_act, right_act)
act = act / norm
return act
class TriangleAttention(nn.Layer):
"""Triangle Attention.
Jumper et al. (2021) Suppl. Alg. 13 "TriangleAttentionStartingNode"
Jumper et al. (2021) Suppl. Alg. 14 "TriangleAttentionEndingNode"
"""
def __init__(self, channel_num, config, global_config, name='triangle_attention'):
super(TriangleAttention, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
assert config.orientation in ['per_row', 'per_column']
self.query_norm = nn.LayerNorm(channel_num['pair_channel'],
name='query_norm')
self.feat_2d_weights = paddle.create_parameter(
[channel_num['pair_channel'], self.config.num_head], 'float32',
default_initializer=nn.initializer.Normal(
std=1. / np.sqrt(channel_num['pair_channel'])))
self.attention = Attention(self.config, self.global_config,
channel_num['pair_channel'], channel_num['pair_channel'],
channel_num['pair_channel'])
def forward(self, pair_act, pair_mask):
"""Builds TriangleAttention module.
Arguments:
pair_act: [batch, N_res, N_res, c_z] pair activations tensor
pair_mask: [batch, N_res, N_res] mask of non-padded regions in the tensor.
Returns:
Update to pair_act, shape [batch, N_res, N_res, c_z].
"""
if self.config.orientation == 'per_column':
pair_act = pair_act.transpose([0, 2, 1, 3])
pair_mask = pair_mask.transpose([0, 2, 1])
# [B, N_res//dap_size, N_res]
bias = 1e9 * (pair_mask - 1.)
# [B, N_res//dap_size, 1, 1, N_res]
bias = paddle.unsqueeze(bias, axis=[2, 3])
pair_act = self.query_norm(pair_act)
# [B, N_res//dap_size, N_res, cz], [cz, head] => [B, head, N_res//dap_size, N_res]
nonbatched_bias_before = paddle.einsum('bqkc,ch->bhqk', pair_act, self.feat_2d_weights)
# # [B, head, N_res//dap_size, N_res] => [B, head, N_res, N_res]
nonbatched_bias = dap.all_gather(nonbatched_bias_before, axis=2)
nonbatched_bias = dap.all_gather_opp(nonbatched_bias, axis=2)
if not self.training:
# low memory mode using subbatch
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
self.global_config.subbatch_size, 1, same_arg_idx={1: 0})
pair_act = sb_attn(pair_act, pair_act, bias, nonbatched_bias)
else:
pair_act = self.attention(pair_act, pair_act, bias, nonbatched_bias)
if self.config.orientation == 'per_column':
pair_act = pair_act.transpose([0, 2, 1, 3])
return pair_act
class TriangleMultiplication(nn.Layer):
"""Triangle multiplication layer ("outgoing" or "incoming").
Jumper et al. (2021) Suppl. Alg. 11 "TriangleMultiplicationOutgoing"
Jumper et al. (2021) Suppl. Alg. 12 "TriangleMultiplicationIncoming"
"""
def __init__(self, channel_num, config, global_config, name='triangle_multiplication'):
super(TriangleMultiplication, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.layer_norm_input = nn.LayerNorm(self.channel_num['pair_channel'], name='layer_norm_input')
self.left_projection = nn.Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='left_projection')
self.right_projection = nn.Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='right_projection')
self.left_gate = nn.Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='left_gate')
init_gate_linear(self.left_gate)
self.right_gate = nn.Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='right_gate')
init_gate_linear(self.right_gate)
# line 4
self.center_layer_norm = nn.LayerNorm(self.config.num_intermediate_channel, name='center_layer_norm')
self.output_projection = nn.Linear(self.config.num_intermediate_channel,
self.channel_num['pair_channel'], name='output_projection')
init_final_linear(self.output_projection)
# line 3
self.gating_linear = nn.Linear(self.channel_num['pair_channel'],
self.channel_num['pair_channel'], name='output_projection')
init_gate_linear(self.gating_linear)
def forward(self, act, mask):
"""Builds TriangleMultiplication module.
Arguments:
act: Pair activations, shape [batch, N_res, N_res, c_z]
mask: Pair mask, shape [batch, N_res, N_res].
Returns:
Outputs, same shape/type as act.
"""
# Outgoing [batch, N_res//dap_size, N_res] => [batch, N_res//dap_size, N_res, 1]
# Incoming [batch, N_res, N_res//dap_size] => [batch, N_res, N_res//dap_size, 1]
mask = paddle.unsqueeze(mask, axis=-1) # [batch, N_res, N_res, 1]
# Outgoing [B, N_res//dap_size, N_res, c_z]
# Incoming [B, N_res, N_res//dap_size, c_z]
act = self.layer_norm_input(act) # line 1
# Outgoing [B, N_res//dap_size, N_res, c_z] => [B, N_res//dap_size, N_res, num_intermediate_channel]
# Incoming [B, N_res, N_res//dap_size, c_z] => [B, N_res, N_res//dap_size, num_intermediate_channel]
left_proj_act = mask * self.left_projection(act)
right_proj_act = mask * self.right_projection(act)
# Outgoing [B, N_res//dap_size, N_res, c_z] => [B, N_res//dap_size, N_res, num_intermediate_channel]
# Incoming [B, N_res, N_res//dap_size, c_z] => [B, N_res, N_res//dap_size, num_intermediate_channel]
left_gate_values = nn.functional.sigmoid(self.left_gate(act))
right_gate_values = nn.functional.sigmoid(self.right_gate(act))
# Outgoing [B, N_res//dap_size, N_res, num_intermediate_channel]
# Incoming [B, N_res, N_res//dap_size, num_intermediate_channel]
left_proj_act = left_proj_act * left_gate_values
right_proj_act_before = right_proj_act * right_gate_values
# "Outgoing" edges equation: 'ikc,jkc->ijc'
# "Incoming" edges equation: 'kjc,kic->ijc'
# Note on the Suppl. Alg. 11 & 12 notation:
# For the "outgoing" edges, a = left_proj_act and b = right_proj_act
# For the "incoming" edges, it's swapped:
# b = left_proj_act and a = right_proj_act
if self.config.equation == 'ikc,jkc->ijc':
# Outgoing
# [B, N_res//dap_size, N_res, num_intermediate_channel] => [B, N_res, N_res, num_intermediate_channel]
right_proj_act = dap.all_gather(right_proj_act_before, axis=1)
elif self.config.equation == 'kjc,kic->ijc':
# Incoming
# [B, N_res, N_res//dap_size, num_intermediate_channel] => [B, N_res, N_res, num_intermediate_channel]
right_proj_act = dap.all_gather(right_proj_act_before, axis=2)
else:
raise ValueError('unknown equation.')
# Outgoing [B, N_res//dap_size, N_res, c_z]
# Incoming [B, N_res, N_res//dap_size, c_z]
gate_values = nn.functional.sigmoid(self.gating_linear(act)) # line 3
if self.config.equation == 'ikc,jkc->ijc':
# Outgoing
dim, out_idx = 1, 1
equation = 'bikc,bjkc->bijc'
# [B, N_res, N_res, num_intermediate_channel]
right_proj_act_after = dap.all_gather_opp(right_proj_act, axis=1)
elif self.config.equation == 'kjc,kic->ijc':
# Incoming
dim, out_idx = 2, 2
equation = 'bkjc,bkic->bijc'
# [B, N_res, N_res, num_intermediate_channel]
right_proj_act_after = dap.all_gather_opp(right_proj_act, axis=2)
else:
raise ValueError('unknown equation.')
if not self.training:
einsum_fn = subbatch(paddle.einsum, [1], [dim],
self.global_config.subbatch_size, out_idx)
act = einsum_fn(equation, left_proj_act, right_proj_act_after)
else:
# Outgoing equation = 'bikc,bjkc->bijc'
# [B, N_res//dap_size, N_res, num_intermediate_channel], [B, N_res, N_res, num_intermediate_channel]
# => [B, N_res//dap_size, N_res, num_intermediate_channel]
# Incoming equation = 'bkjc,bkic->bijc'
# [B, N_res, N_res//dap_size, num_intermediate_channel], [B, N_res, N_res, num_intermediate_channel]
# => [B, N_res, N_res//dap_size, num_intermediate_channel]
act = paddle.einsum(equation, left_proj_act, right_proj_act_after)
act = self.center_layer_norm(act)
act = self.output_projection(act)
act = act * gate_values
return act
class TemplatePair(nn.Layer):
"""Pair processing for the templates.
Jumper et al. (2021) Suppl. Alg. 16 "TemplatePairStack" lines 2-6
"""
def __init__(self, channel_num, config, global_config):
super(TemplatePair, self).__init__()
self.config = config
self.global_config = global_config
channel_num = {}
channel_num['pair_channel'] = self.config.triangle_attention_ending_node.value_dim
self.triangle_attention_starting_node = TriangleAttention(channel_num,
self.config.triangle_attention_starting_node, self.global_config,
name='triangle_attention_starting_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_starting_node)
self.triangle_starting_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_attention_ending_node = TriangleAttention(channel_num,
self.config.triangle_attention_ending_node, self.global_config,
name='triangle_attention_ending_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_ending_node)
self.triangle_ending_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_multiplication_outgoing = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_outgoing, self.global_config,
name='triangle_multiplication_outgoing')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_outgoing)
self.triangle_outgoing_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_multiplication_incoming = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_incoming, self.global_config,
name='triangle_multiplication_incoming')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_incoming)
self.triangle_incoming_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.pair_transition = Transition(channel_num, self.config.pair_transition,
self.global_config, is_extra_msa=False,
transition_type='pair_transition')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.pair_transition)
self.pair_transition_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
def _parse_dropout_params(self, module):
dropout_rate = 0.0 if self.global_config.deterministic else \
module.config.dropout_rate
dropout_axis = None
if module.config.shared_dropout:
dropout_axis = {
'per_row': [0, 2, 3],
'per_column': [0, 1, 3],
}[module.config.orientation]
return dropout_rate, dropout_axis
def forward(self, pair_act, pair_mask):
"""Builds one block of TemplatePair module.
Arguments:
pair_act: Pair activations for single template, shape [batch, N_res, N_res, c_t].
pair_mask: Pair mask, shape [batch, N_res, N_res].
Returns:
Updated pair_act, shape [batch, N_res, N_res, c_t].
"""
pair_mask_row = dap.scatter(pair_mask, axis=1)
pair_mask_col = dap.scatter(pair_mask, axis=2)
residual = self.triangle_attention_starting_node(pair_act, pair_mask_row)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
pair_act = dap.row_to_col(pair_act)
residual = self.triangle_attention_ending_node(pair_act, pair_mask_col)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
pair_act = dap.col_to_row(pair_act)
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask_row)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
pair_act = dap.row_to_col(pair_act)
residual = self.triangle_multiplication_incoming(pair_act, pair_mask_col)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
pair_act = dap.col_to_row(pair_act)
return pair_act
class SingleTemplateEmbedding(nn.Layer):
"""Embeds a single template.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9+11
"""
def __init__(self, channel_num, config, global_config):
super(SingleTemplateEmbedding, self).__init__()
self.config = config
self.channel_num = channel_num
self.global_config = global_config
self.embedding2d = nn.Linear(channel_num['template_pair'],
self.config.template_pair_stack.triangle_attention_ending_node.value_dim)
self.template_pair_stack = nn.LayerList()
for _ in range(self.config.template_pair_stack.num_block):
self.template_pair_stack.append(TemplatePair(
self.channel_num, self.config.template_pair_stack, self.global_config))
self.output_layer_norm = nn.LayerNorm(self.config.attention.key_dim)
def forward(self, query_embedding, batch, mask_2d):
"""Build the single template embedding.
Arguments:
query_embedding: Query pair representation, shape [batch, N_res, N_res, c_z].
batch: A batch of template features (note the template dimension has been
stripped out as this module only runs over a single template).
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
Returns:
A template embedding [N_res, N_res, c_z].
"""
assert mask_2d.dtype == query_embedding.dtype
dtype = query_embedding.dtype
num_res = batch['template_aatype'].shape[1]
template_mask = batch['template_pseudo_beta_mask']
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
template_mask_2d = template_mask_2d.astype(dtype)
template_dgram = dgram_from_positions(
batch['template_pseudo_beta'],
**self.config.dgram_features)
template_dgram = template_dgram.astype(dtype)
aatype = nn.functional.one_hot(batch['template_aatype'], 22)
aatype = aatype.astype(dtype)
to_concat = [template_dgram, template_mask_2d[..., None]]
to_concat.append(paddle.tile(aatype[..., None, :, :],
[1, num_res, 1, 1]))
to_concat.append(paddle.tile(aatype[..., None, :],
[1, 1, num_res, 1]))
n, ca, c = [residue_constants.atom_order[a]
for a in ('N', 'CA', 'C')]
rot, trans = quat_affine.make_transform_from_reference(
n_xyz=batch['template_all_atom_positions'][..., n, :],
ca_xyz=batch['template_all_atom_positions'][..., ca, :],
c_xyz=batch['template_all_atom_positions'][..., c, :])
affines = quat_affine.QuatAffine(
quaternion=quat_affine.rot_to_quat(rot),
translation=trans,
rotation=rot)
points = [paddle.unsqueeze(x, axis=-2) for x in
paddle.unstack(affines.translation, axis=-1)]
affine_vec = affines.invert_point(points, extra_dims=1)
inv_distance_scalar = paddle.rsqrt(
1e-6 + sum([paddle.square(x) for x in affine_vec]))
# Backbone affine mask: whether the residue has C, CA, N
# (the template mask defined above only considers pseudo CB).
template_mask = (
batch['template_all_atom_masks'][..., n] *
batch['template_all_atom_masks'][..., ca] *
batch['template_all_atom_masks'][..., c])
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
inv_distance_scalar *= template_mask_2d.astype(inv_distance_scalar.dtype)
unit_vector = [(x * inv_distance_scalar)[..., None] for x in affine_vec]
unit_vector = [x.astype(dtype) for x in unit_vector]
if not self.config.use_template_unit_vector:
unit_vector = [paddle.zeros_like(x) for x in unit_vector]
to_concat.extend(unit_vector)
template_mask_2d = template_mask_2d.astype(dtype)
to_concat.append(template_mask_2d[..., None])
act = paddle.concat(to_concat, axis=-1)
# Mask out non-template regions so we don't get arbitrary values in the
# distogram for these regions.
act *= template_mask_2d[..., None]
act = self.embedding2d(act)
act = dap.scatter(act, axis=1)
for idx, pair_encoder in enumerate(self.template_pair_stack):
act = recompute_wrapper(pair_encoder, act, mask_2d,
is_recompute=self.training and idx >= self.config.template_pair_stack.recompute_start_block_index)
act = dap.gather(act, axis=1)
act = self.output_layer_norm(act)
return act
class TemplateEmbedding(nn.Layer):
"""Embeds a set of templates.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12
Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention"
"""
def __init__(self, channel_num, config, global_config):
super(TemplateEmbedding, self).__init__()
self.config = config
self.global_config = global_config
self.single_template_embedding = SingleTemplateEmbedding(
channel_num, config, global_config)
self.attention = Attention(
config.attention, global_config,
channel_num['pair_channel'],
config.attention.key_dim,
channel_num['pair_channel'])
def forward(self, query_embedding, template_batch, mask_2d):
"""Build TemplateEmbedding module.
Arguments:
query_embedding: Query pair representation, shape [n_batch, N_res, N_res, c_z].
template_batch: A batch of template features.
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
Returns:
A template embedding [n_batch, N_res, N_res, c_z].
"""
num_templates = template_batch['template_mask'].shape[1]
num_channels = (self.config.template_pair_stack
.triangle_attention_ending_node.value_dim)
num_res = query_embedding.shape[1]
dtype = query_embedding.dtype
template_mask = template_batch['template_mask']
template_mask = template_mask.astype(dtype)
query_channels = query_embedding.shape[-1]
outs = []
for i in range(num_templates):
# By default, num_templates = 4
batch0 = {k: paddle.squeeze(v.slice([1], [i], [i+1]), axis=1)
for k, v in template_batch.items()}
outs.append(self.single_template_embedding(
query_embedding, batch0, mask_2d))
template_pair_repr = paddle.stack(outs, axis=1)
flat_query = paddle.reshape(
query_embedding, [-1, num_res * num_res, 1, query_channels])
flat_templates = paddle.reshape(
paddle.transpose(template_pair_repr, [0, 2, 3, 1, 4]),
[-1, num_res * num_res, num_templates, num_channels])
bias = 1e9 * (template_mask[:, None, None, None, :] - 1.)
if not self.training:
sb_attn = subbatch(self.attention, [0, 1], [1, 1],
self.config.subbatch_size, 1)
emb = sb_attn(flat_query, flat_templates, bias)
else:
emb = self.attention(flat_query, flat_templates, bias)
emb = paddle.reshape(
emb, [-1, num_res, num_res, query_channels])
# No gradients if no templates.
emb *= (paddle.sum(template_mask) > 0.).astype(emb.dtype)
return emb
| 107,160 | 42.543681 | 136 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold-single/alphafold_paddle/model/utils.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils."""
import os
import numbers
import functools
import collections
import paddle
import numpy as np
from typing import Any, Mapping
from alphafold_paddle.common import protein
from alphafold_paddle.common import confidence
def jax_params_to_paddle(params):
"""
Rule 1: alphafold/alphafold_iteration/evoformer/template_embedding/single_template_embedding/template_pair_stack/* ==>
'...template_pair_stack.0.*'
'...template_pair_stack.1.*'
...
Rule 2: alphafold/alphafold_iteration/evoformer/extra_msa_stack/* ==>
'alphafold_iteration.evoformer.extra_msa_stack.0.*',
'alphafold_iteration.evoformer.extra_msa_stack.1.*',
...
Rule 3: alphafold/alphafold_iteration/evoformer/evoformer_iteration/* ==>
'alphafold.alphafold_iteration.evoformer.evoformer_iteration.0.*',
'alphafold.alphafold_iteration.evoformer.evoformer_iteration.1.*',
...
Rule 4: */__layer_stack_no_state/* ==> '*.*'
Rule 5: *//weights ==> '*.weight'
Rule 6: *//bias ==> '*.bias'
Rule 7: *//scale ==> '*.weight'
Rule 8: *//offset ==> '*.bias'
"""
rule_1_prefix = 'alphafold/alphafold_iteration/evoformer/template_embedding/single_template_embedding/template_pair_stack/'
rule_2_prefix = 'alphafold/alphafold_iteration/evoformer/extra_msa_stack/'
rule_3_prefix = 'alphafold/alphafold_iteration/evoformer/evoformer_iteration/'
rule_4_prefix = '__layer_stack_no_state/'
pd_params = dict()
def _parse_stack_or_iteration(rule_prefix, k):
n = params[k].shape[0]
suffix = k[len(rule_prefix):]
# rule 4
if suffix.startswith(rule_4_prefix):
suffix = suffix[len(rule_4_prefix):]
# rule 5
suffix = suffix.replace('//weights', '.weight')
# rule 6
suffix = suffix.replace('//bias', '.bias')
# rule 7
suffix = suffix.replace('//scale', '.weight')
# rule 8
suffix = suffix.replace('//offset', '.bias')
suffix = suffix.replace('//', '.')
suffix = suffix.replace('/', '.')
prefix = rule_prefix.replace('/', '.')
for i in range(n):
k_ = f'{prefix}{i}.{suffix}'
pd_params[k_] = np.copy(params[k][i])
for k in params.keys():
if k.startswith(rule_1_prefix):
_parse_stack_or_iteration(rule_1_prefix, k)
elif k.startswith(rule_2_prefix):
_parse_stack_or_iteration(rule_2_prefix, k)
elif k.startswith(rule_3_prefix):
_parse_stack_or_iteration(rule_3_prefix, k)
else:
k_ = k.replace('//weights', '.weight')
k_ = k_.replace('//scale', '.weight')
k_ = k_.replace('//offset', '.bias')
k_ = k_.replace('//', '.')
k_ = k_.replace('/', '.')
pd_params[k_] = np.copy(params[k])
return pd_params
def slice_batch(batch, i):
b = {k: v[i] for k, v in batch.items()}
return b
def add_batch_dim(batch):
b = {k: v[None,] for k, v in batch.items()}
return b
def map_to_tensor(batch, add_batch=False):
if add_batch:
batch = add_batch_dim(batch)
b = {k: paddle.to_tensor(v) for k, v in batch.items()}
return b
def mask_mean(mask, value, axis=None, drop_mask_channel=False, eps=1e-10):
if drop_mask_channel:
mask = mask[:, 0]
mask_shape = mask.shape
value_shape = value.shape
assert len(mask_shape) == len(value_shape)
if isinstance(axis, numbers.Integral):
axis = [axis]
elif axis is None:
axis = list(range(len(mask_shape)))
assert isinstance(axis, collections.abc.Iterable), \
'axis needs to be either an iterable, integer or "None"'
broadcast_factor = 1.
for axis_ in axis:
value_size = value_shape[axis_]
mask_size = mask_shape[axis_]
if mask_size == 1:
broadcast_factor *= value_size
else:
assert mask_size == value_size
return (paddle.sum(mask * value, axis=axis) /
(paddle.sum(mask, axis=axis) * broadcast_factor + eps))
def batched_gather(params, indices, axis=0, batch_dims=0):
# Implement gather with batching, like tensorflow:
# https://www.tensorflow.org/api_docs/python/tf/gather#batching
# print(params.shape, indices.shape, axis)
p, i = params, indices
rank = len(p.shape)
axis = (rank + axis) % rank
# The stride of axis
stride = p.shape[batch_dims + axis]
if batch_dims == 0 and len(i.shape) == 1:
return paddle.gather(p, i, axis=axis)
elif batch_dims == 0:
flat_i = i.reshape([-1])
gathered = paddle.gather(p, flat_i, axis=axis)
shape = p.shape[:axis] + i.shape
if axis < rank - 1:
shape += params.shape[axis + 1:]
return gathered.reshape(shape)
b = batch_dims
a = axis
assert p.shape[:b] == i.shape[:b]
bn = np.prod(p.shape[:b])
# Shift batch dimensions right to bundle with axis
if a > 0:
perm = list(range(rank))
perm = perm[b:(b + a)] + perm[:b] + perm[(b + a):]
p = p.transpose(perm)
# Merge params' batch+axis
p = p.reshape(p.shape[:a] + [-1] + p.shape[(b + a + 1):])
# indices = [Batch..., Index...]
# Expand the index values across batch elements
strides = paddle.arange(bn).unsqueeze(-1) * stride
i = i.reshape([bn, -1])
flat_i = paddle.flatten(i + strides)
# Do gather
gathered = paddle.gather(p, flat_i, axis=axis)
# Unbundle batch and index dimensions
unbundled_shape = p.shape[:a] + indices.shape + p.shape[a + 1:]
gathered = gathered.reshape(unbundled_shape)
# Shift batch dimensions back to the left
if a > 0:
perm = list(range(len(unbundled_shape)))
perm = perm[a:(a + b)] + perm[:a] + perm[(a + b):]
gathered = gathered.transpose(perm)
return gathered
def subbatch(f, arg_idx, dim, bs, out_idx, same_arg_idx={}):
""" Converts a function to one that applies to subbatch of an input
dimension.
Args:
f(Callable): original function.
arg_idx([int]): indices of the inputs to be subbatched.
dim([int]): index of the dimension to be subbatched.
bs(int): subbatch size.
out_idx(int): index of the output dimension that needs stacking
same_arg_idx(dict), optional: index of same arg mapping. e.g {1: 0} means arg[1] == arg[0],
we assign _args[1] = _args[0] avoiding slice repeatly.
Returns:
converted function.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
assert len(arg_idx) == len(dim), f'Number of batching args and number of batching dims should match.'
inps = [args[i] for i in arg_idx]
dim_width = [inp.shape[d] for inp, d in zip(inps, dim)]
assert len(set(dim_width)) == 1, f'Batch sizes should be kept equal.'
inp_dim = {inp: d for inp, d in zip(inps, dim)}
dim_width = dim_width[0]
if dim_width < bs:
return f(*args, **kwargs)
outs = []
for slice_at in np.arange(0, dim_width, bs):
_args = []
for i, inp in enumerate(args):
if i in same_arg_idx:
assert i > same_arg_idx[i], f"expect i > same_arg_idx[i], but got i: {i} and same_arg_idx[i]: {same_arg_idx[i]}"
_args.append(_args[same_arg_idx[i]])
elif i in arg_idx:
inp = inp.slice([inp_dim[inp]], [slice_at], [slice_at + bs])
_args.append(inp)
else:
_args.append(inp)
outs.append(f(*_args, **kwargs))
return paddle.concat(outs, out_idx)
return wrapper
def get_confidence_metrics(
prediction_result: Mapping[str, Any]) -> Mapping[str, Any]:
"""Post processes prediction_result to get confidence metrics."""
confidence_metrics = {}
confidence_metrics['plddt'] = confidence.compute_plddt(
prediction_result['predicted_lddt']['logits'])
if 'predicted_aligned_error' in prediction_result:
confidence_metrics.update(confidence.compute_predicted_aligned_error(
prediction_result['predicted_aligned_error']['logits'],
prediction_result['predicted_aligned_error']['breaks']))
confidence_metrics['ptm'] = confidence.predicted_tm_score(
prediction_result['predicted_aligned_error']['logits'],
prediction_result['predicted_aligned_error']['breaks'])
return confidence_metrics
def generate_unrelaxed_pdb(aatype, residue_index, model_output, pdb_path,
b_factors=None):
fold_output = model_output['structure_module']
if b_factors is None:
b_factors = np.zeros_like(fold_output['final_atom_mask'])
# NOTE: for single protein, chain_index is always 'A' (idx:0)
prot = protein.Protein(
aatype=aatype,
atom_positions=fold_output['final_atom_positions'],
atom_mask=fold_output['final_atom_mask'],
residue_index=residue_index + 1,
chain_index=np.zeros(aatype.shape),
b_factors=b_factors)
with open(pdb_path, 'w') as f:
f.write(protein.to_pdb(prot))
return prot
def set_tensor_constant(tensor, constant):
tensor.set_value(paddle.full_like(tensor, constant))
def init_gate_linear(linear):
set_tensor_constant(linear.weight, 0)
set_tensor_constant(linear.bias, 1)
def init_final_linear(linear):
set_tensor_constant(linear.weight, 0)
| 10,196 | 31.578275 | 132 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold-single/alphafold_paddle/model/model.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model."""
import os
import io
import time
import pickle
import logging
import pathlib
import numpy as np
import ml_collections
from copy import deepcopy
from typing import Dict, Optional
import paddle
from alphafold_paddle.model import utils
from alphafold_paddle.relax import relax
from alphafold_paddle.model import modules
from alphafold_paddle.common import protein
from alphafold_paddle.common import residue_constants
try:
import tensorflow.compat.v1 as tf
from alphafold_paddle.data.tf_input import input_pipeline
from alphafold_paddle.data.tf_input import proteins_dataset
USE_TF = True
except Exception:
from alphafold_paddle.data.input import input_pipeline
USE_TF = False
logger = logging.getLogger(__name__)
TARGET_FEAT_DIM = 22
MSA_FEAT_DIM = 49
def print_shape(d, level=0):
tabs = '\t' * level
for k, v in d.items():
if type(v) is dict:
print(tabs + k)
print_shape(v, level=level+1)
else:
print(tabs + f'{k}: {v.shape} {v.dtype}')
def tensor_to_numpy(pred_dict):
for k in pred_dict.keys():
if isinstance(pred_dict[k], paddle.Tensor):
pred_dict[k] = pred_dict[k].numpy()
elif type(pred_dict[k]) is dict:
tensor_to_numpy(pred_dict[k])
def slice_pred_dict(pred_dict, slice_idx, ignores=['breaks', 'traj', 'sidechains']):
for k in pred_dict.keys():
if k in ignores:
continue
if type(pred_dict[k]) is dict:
pred_dict[k] = slice_pred_dict(pred_dict[k], slice_idx,
ignores=ignores)
else:
pred_dict[k] = pred_dict[k][slice_idx]
return pred_dict
class RunModel(object):
"""Wrapper for paddle model."""
def __init__(self,
name: str,
config: ml_collections.ConfigDict,
params_path: str,
dynamic_subbatch_size: bool = True):
self.name = name
self.config = config
self.dynamic_subbatch_size = dynamic_subbatch_size
channel_num = {
'target_feat': TARGET_FEAT_DIM,
'msa_feat': MSA_FEAT_DIM,
}
self.alphafold = modules.AlphaFold(channel_num, config.model)
self.init_params(str(params_path))
self.alphafold.eval()
def init_params(self, params_path: str):
if params_path.endswith('.npz'):
logger.info('Load as AlphaFold pre-trained model')
with open(params_path, 'rb') as f:
params = np.load(io.BytesIO(f.read()), allow_pickle=False)
params = dict(params)
pd_params = utils.jax_params_to_paddle(params)
pd_params = {k[len('alphafold.'):]: v for k, v in pd_params.items()}
elif params_path.endswith('.pd'):
logger.info('Load as Paddle model')
pd_params = paddle.load(params_path)
else:
raise ValueError('Unsupported params file type')
self.alphafold.set_state_dict(pd_params)
def preprocess(self,
raw_features: Dict[str, np.ndarray],
random_seed: int,
pkl: pathlib.Path = None) -> Dict[str, paddle.Tensor]:
"""Convert raw input features to model input features"""
if pkl is not None and pkl.exists():
logger.info(f'Use cached {pkl}')
with open(pkl, 'rb') as f:
features = pickle.load(f)
print('########## feature shape ##########')
print_shape(features)
return utils.map_to_tensor(features, add_batch=True)
print('Processing input features')
data_config = deepcopy(self.config.data)
feature_names = data_config.common.unsupervised_features
if data_config.common.use_templates:
feature_names += data_config.common.template_features
num_residues = int(raw_features['seq_length'][0])
data_config.eval.crop_size = num_residues
if 'deletion_matrix_int' in raw_features:
raw_features['deletion_matrix'] = (raw_features.pop(
'deletion_matrix_int').astype(np.float32))
if USE_TF:
data_config.eval.delete_msa_block = False
tf_graph = tf.Graph()
with tf_graph.as_default(), tf.device('/device:CPU:0'):
tf.compat.v1.set_random_seed(random_seed)
tensor_dict = proteins_dataset.np_to_tensor_dict(
np_example=raw_features, features=feature_names)
processed_batch = input_pipeline.process_tensors_from_config(
tensor_dict, data_config)
tf_graph.finalize()
with tf.Session(graph=tf_graph) as sess:
features = sess.run(processed_batch)
else:
array_dict = input_pipeline.np_to_array_dict(
np_example=raw_features, features=feature_names,
use_templates=data_config.common.use_templates)
features = input_pipeline.process_arrays_from_config(
array_dict, data_config)
features = {k: v for k, v in features.items() if v.dtype != 'O'}
extra_msa_length = data_config.common.max_extra_msa
for k in ['extra_msa', 'extra_has_deletion', 'extra_deletion_value',
'extra_msa_mask']:
features[k] = features[k][:, :extra_msa_length]
for k in features.keys():
if features[k].dtype == np.int64:
features[k] = features[k].astype(np.int32)
elif features[k].dtype == np.float64:
features[k] = features[k].astype(np.float32)
if pkl is not None:
with open(pkl, 'wb') as f:
pickle.dump(features, f, protocol=4)
print('Preprocessesing finished')
print('########## feature shape ##########')
print_shape(features)
return utils.map_to_tensor(features, add_batch=True)
def predict(self,
feat: Dict[str, paddle.Tensor],
ensemble_representations: bool = True,
return_representations: bool = True):
"""Predict protein structure and encoding representation"""
if self.dynamic_subbatch_size:
seq_len = feat['aatype'].shape[-1]
extra_msa_num = feat['extra_msa'].shape[-2]
self.update_subbatch_size(seq_len, extra_msa_num)
with paddle.no_grad():
ret = self.alphafold(
feat, {},
ensemble_representations=ensemble_representations,
return_representations=return_representations,
compute_loss=False)
print('Prediction finished')
tensor_to_numpy(ret)
return ret
def postprocess(self,
aatype: np.ndarray,
residue_index: np.ndarray,
relaxer: relax.AmberRelaxation,
prediction: Dict[str, np.ndarray],
output_dir: pathlib.Path,
slice_idx: int = 0,
timings: Optional[Dict[str, float]] = None):
"""Compute pLDDT, save unrelaxed pdb and execute relaxation"""
single_pred = slice_pred_dict(prediction, slice_idx)
prediction.update(utils.get_confidence_metrics(single_pred))
plddt = prediction['plddt']
logger.info(f'{self.name} average pLDDT: {np.mean(plddt)}')
if 'max_predicted_aligned_error' in prediction:
err = prediction['max_predicted_aligned_error']
logger.info(f'{self.name} max predicted aligned error: {err}')
with open(output_dir.joinpath(f'result_{self.name}.pkl'), 'wb') as f:
pickle.dump(prediction, f, protocol=4)
plddt_b_factors = np.repeat(
plddt[:, None], residue_constants.atom_type_num, axis=-1)
prot = utils.generate_unrelaxed_pdb(
aatype, residue_index, single_pred,
output_dir.joinpath(f'unrelaxed_{self.name}.pdb'),
b_factors=plddt_b_factors)
if relaxer is not None:
t0 = time.time()
relaxed_pdb_str = relaxer.process(prot=prot)[0]
if timings is not None:
timings[f'relax_{self.name}'] = time.time() - t0
pdb = f'relaxed_{self.name}.pdb'
with open(output_dir.joinpath(pdb), 'w') as f:
f.write(relaxed_pdb_str)
else:
relaxed_pdb_str = protein.to_pdb(prot)
print('Postprocessing finished')
return relaxed_pdb_str
def update_subbatch_size(self, seq_len, extra_msa_num):
if extra_msa_num == 5120:
if seq_len < 200:
# disable subbatch
self.alphafold.global_config.subbatch_size = 5120
elif extra_msa_num == 1024:
if seq_len < 600:
# disable subbatch
self.alphafold.global_config.subbatch_size = 1024
else:
raise ValueError('Unknown subbatch strategy')
| 9,721 | 34.097473 | 84 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold/train.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training scripts."""
import os, io
from os.path import exists, join, dirname
import time
import sys
import argparse
import numpy as np
import random
import json
import ml_collections
import logging
import paddle
from paddle import distributed as dist
from tensorboardX import SummaryWriter
from utils.utils import get_model_parameter_size, add_to_data_writer, upload_to_hadoop, csv_print
from utils.utils import get_custom_amp_list
from utils.metric import ResultsCollect
from utils.model import RunModel
from utils.exponential_moving_average import ExponentialMovingAverage, EMA
from utils.dataset import LoopedBatchSampler, AF2Dataset, AF2TestDataset, AF2DistillDataset
from utils.param_fuse import get_fused_param_groups
from utils.clip_grad import clip_grad_norm_
from utils.init_env import init_seed, init_distributed_env
from utils.misc import TrainLogger, set_logging_level
from alphafold_paddle.model import config, utils
from alphafold_paddle.data.utils import align_feat, align_label
from ppfleetx.distributed.protein_folding import dap, bp, dp
from ppfleetx.distributed.protein_folding.scg import scg
MAX_EVAL_SIZE = int(os.environ.get('MAX_EVAL_SIZE', 1400))
print(f'[ENV] MAX_EVAL_SIZE:{MAX_EVAL_SIZE}')
def time_me():
# paddle.device.cuda.synchronize()
return time.time()
def get_optimizer(args, opt_config, model):
if opt_config.grad_clip == 0:
grad_clip = None
else:
grad_clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=float(opt_config.grad_clip))
if 'decay' in opt_config:
second_scheduler = paddle.optimizer.lr.StepDecay(
learning_rate=opt_config.lr,
step_size=opt_config.decay.step_size,
gamma=opt_config.decay.gamma)
else:
second_scheduler = opt_config.lr
lr_scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=second_scheduler,
warmup_steps=opt_config.warmup_steps,
start_lr=opt_config.lr * 0.01,
end_lr=opt_config.lr,
verbose=False)
parameters = get_fused_param_groups(model, args.dap_degree > 1 or args.bp_degree > 1)
multi_precision = (args.precision == "bf16" and args.amp_level == "O2")
optimizer = paddle.optimizer.Adam(
learning_rate=lr_scheduler,
epsilon=1e-06,
grad_clip=grad_clip,
parameters=parameters,
multi_precision=multi_precision,
)
return optimizer, lr_scheduler
def add_dyna_features(train_config, model_config, batch, step):
"""add `num_iter_recycling` and `use_clamped_fape`"""
random_key = 32
shape = batch['feat']['aatype'].shape[:2]
num_iter_recycling = np.random.default_rng(random_key + step).integers(
model_config.model.num_recycle + 1)
batch['feat']['num_iter_recycling'] = paddle.full(shape, num_iter_recycling)
logging.debug(f'\tAdd dyna feature num_iter_recycling: {num_iter_recycling}')
if train_config.unclamped_fape:
if np.random.default_rng(random_key + step).uniform() < 0.1:
batch['label']['use_clamped_fape'] = paddle.full(shape, 0.0)
logging.debug(f'\tAdd dyna label use_clamped_fape: 0.0')
def check_batch(batch, max_length=None):
"""print data shapes and check max_length"""
def _print(k, d):
if k in d:
logging.debug(f'\t{k}: {d[k].shape}')
logging.debug(f'Get protein_name: {batch["name"]}')
for k in ['aatype', 'msa_feat', 'extra_msa', 'masked_msa_only']:
_print(k, batch["feat"])
for k in ['all_atom_positions']:
_print(k, batch["label"])
L = batch["feat"]['aatype'].shape[2]
if not max_length is None and L > max_length:
logging.debug(f'\tskip {batch["name"]} due to two long length')
return False
return True
@paddle.no_grad()
def eval(args, model, eval_dataset, compute_loss, cache_dir=None):
"""evaluate a given dataset"""
model.eval()
data_loader = paddle.io.DataLoader(
dataset=eval_dataset,
batch_size=1,
drop_last=False,
num_workers=0)
res_collect = ResultsCollect(
eval_tm_score=True,
tm_score_bin=args.tm_score_bin,
lddt_score_bin=args.lddt_score_bin,
cache_dir=cache_dir, distributed=args.distributed)
s0 = time_me()
for i, batch in enumerate(data_loader):
if not check_batch(batch, max_length=MAX_EVAL_SIZE):
continue
s1 = time_me()
if args.dap_degree > 1:
batch['feat'] = align_feat(batch['feat'], args.dap_degree)
batch['label'] = align_label(batch['label'], args.dap_degree)
# inference
def _forward_with_precision(batch):
if args.precision == "bf16":
black_list, white_list = get_custom_amp_list()
with paddle.amp.auto_cast(enable=True,
custom_white_list=white_list,
custom_black_list=black_list,
level=args.amp_level,
dtype='bfloat16'):
return model(batch, compute_loss=compute_loss)
elif args.precision == "fp32":
return model(batch, compute_loss=compute_loss)
else:
raise ValueError("Please choose precision from bf16 and fp32! ")
# res = model(batch, compute_loss=compute_loss)
res = _forward_with_precision(batch)
if compute_loss:
results, loss = res
if loss.dtype == paddle.bfloat16:
loss = loss.cast("float32").item()
else:
loss = loss.item()
else:
results, loss = res, np.zeros([1])
s2 = time_me()
extra_dict = {'loss': np.array(loss)[0], 'data_time': s1 - s0, 'train_time': s2 - s1}
res_collect.add(batch, results, extra_dict)
print(f'Test_step: {i} loss: {extra_dict}')
s0 = time_me()
res = res_collect.get_result()
return res
def full_eval(args, cur_step, model, valid_dataset, test_dataset_dict, data_writer, ema):
# eval valid set
if not valid_dataset is None:
logging.info(f'[Main] Train_step: {cur_step} evaluate valid set ==========')
valid_results = eval(
args,
model,
valid_dataset,
compute_loss=False,
cache_dir=f'{args.log_dir}/valid_pdbs/{cur_step}')
add_to_data_writer(data_writer, cur_step, valid_results, prefix='valid')
csv_print({**valid_results, '0-VALID': '0-VALID'})
logging.info(f'[Main] Train_step: {cur_step} evaluate valid finish ==========')
# eval test set
logging.info(f'[Main] Train_step: {cur_step} evaluate test set ==========')
for name, test_dataset in test_dataset_dict.items():
test_result = eval(
args,
model,
test_dataset,
compute_loss=False,
cache_dir=f'{args.log_dir}/test_pdbs-{name}/{cur_step}')
add_to_data_writer(data_writer, cur_step, test_result, prefix='test-' + name)
csv_print({**test_result, f'0-TEST-{name}': f'0-TEST-{name}'})
logging.info(f'[Main] Train_step: {cur_step} evaluate test finish ==========')
# eval test set ema
ema.apply_shadow()
logging.info(f'[Main] Train_step: {cur_step} evaluate test-ema set ==========')
for name, test_dataset in test_dataset_dict.items():
test_ema_result = eval(
args,
model,
test_dataset,
compute_loss=False,
cache_dir=f'{args.log_dir}/test_pdbs-ema-{name}/{cur_step}')
add_to_data_writer(data_writer, cur_step, test_ema_result, prefix='test-ema-' + name)
csv_print({**test_ema_result, f'0-TEST-ema-{name}': f'0-TEST-ema-{name}'})
logging.info(f'[Main] Train_step: {cur_step} evaluate test-ema finish ==========')
ema.restore()
def train(args, cur_step, model, train_data_gen, distill_data_gen, train_config, model_config, lr_scheduler, optimizer, res_collect, train_logger, ema):
model.train()
# fetch data
logging.debug(f'[Main] Train_step: {cur_step} fetch_data')
s0 = time_me()
batch = None
if distill_data_gen:
rand_distill = np.random.random()
batch = next(distill_data_gen) if rand_distill > 0.25 else next(train_data_gen)
else:
batch = next(train_data_gen)
if not check_batch(batch):
return
add_dyna_features(train_config, model_config, batch, cur_step)
# train
def _forward_with_precision(batch):
if args.precision == "bf16":
black_list, white_list = get_custom_amp_list()
with paddle.amp.auto_cast(enable=True,
custom_white_list=white_list,
custom_black_list=black_list,
level=args.amp_level,
dtype='bfloat16'):
return model(batch)
elif args.precision == "fp32":
return model(batch)
else:
raise ValueError("Please choose precision from bf16 and fp32! ")
s1 = time_me()
logging.debug(f'[Main] Train_step: {cur_step} train')
results, loss = _forward_with_precision(batch)
s2 = time_me()
loss.backward()
s3 = time_me()
if args.distributed and cur_step % args.gradient_merge_k_steps == 0:
# sync the gradient for branch parallel firstly
bp.grad_sync(optimizer._param_groups)
# then sync the gradient for dap
dap.grad_sync(optimizer._param_groups)
# finally sync the gradient for ddp
dp.grad_sync(optimizer._param_groups)
s4 = time_me()
if cur_step % args.gradient_merge_k_steps == 0:
optimizer.step()
lr_scheduler.step()
ema.update()
optimizer.clear_grad()
loss = loss.cast("float32") if loss.dtype == paddle.bfloat16 else loss
s5 = time_me()
batch_cost = s5 - s0
train_logger.update("loss", loss.item())
train_logger.update("reader_cost", s1 - s0)
train_logger.update("forward_cost", s2 - s1)
train_logger.update("backward_cost", s3 - s2)
train_logger.update("gradsync_cost", s4 - s3)
train_logger.update("update_cost", s5 - s4)
train_logger.update("batch_cost", batch_cost)
train_logger.update("protein", args.global_batch_size)
train_logger.update("train_cost", batch_cost)
if cur_step % args.gradient_merge_k_steps == 0:
train_logger.update("avg_loss", train_logger.mean("loss"))
log_msg = f"[Main] Train_step: {cur_step}, " + train_logger.msg()
extra_dict = train_logger.state_dict()
train_logger.reset("loss")
res_collect.add(batch, results, extra_dict)
logging.info(log_msg)
def main(args):
set_logging_level(args.logging_level)
"""main function"""
new_einsum = os.getenv("FLAGS_new_einsum", True)
print(f'>>> PaddlePaddle commit: {paddle.version.commit}')
print(f'>>> FLAGS_new_einsum: {new_einsum}')
print(f'>>> args:\n{args}')
data_config = ml_collections.ConfigDict(json.load(open(args.data_config, 'r')))
print(f'>>> data_config:\n{data_config}')
train_config = ml_collections.ConfigDict(json.load(open(args.train_config, 'r')))
print(f'>>> train_config:\n{train_config}')
### check paddle version
if args.distributed:
assert paddle.fluid.core.is_compiled_with_dist(), "Please using the paddle version compiled with distribute."
args.distributed = args.distributed and dist.get_world_size() > 1
dp_rank, dp_nranks = init_distributed_env(args)
print(f'>>> dp_rank: {dp_rank}, dp_nranks: {dp_nranks}')
args.global_batch_size = dp_nranks * args.batch_size
### set seed for reproduce experiment results
if args.seed is not None:
args.seed += dp_rank
init_seed(args.seed)
def worker_init_fn(worker_id):
""" set seed in subproces for dataloader when num_workers > 0"""
np.random.seed(args.seed + worker_id)
random.seed(args.seed + worker_id)
### create model
model_config = config.model_config(args.model_name)
if args.bp_degree > 1 or args.dap_degree > 1:
model_config.model.global_config.dist_model = True
if args.bp_degree > 1:
model_config.model.global_config.outer_product_mean_position = 'end'
print(f'>>> model_config:\n{model_config}')
model = RunModel(train_config, model_config)
if args.distributed:
# broadcast param to other ranks when using distributed data parallel
dp.param_sync(model, src_rank=0)
if dist.get_rank() == 0:
# print("model:", model)
print("model size:", get_model_parameter_size(model))
if (not args.init_model is None) and (not args.init_model == ""):
print(f"Load pretrain model from {args.init_model}")
if args.init_model.endswith('.npz'):
with open(args.init_model, 'rb') as f:
params = np.load(io.BytesIO(f.read()), allow_pickle=False)
params = dict(params)
pd_params = utils.jax_params_to_paddle(params)
pd_params = {k[len('alphafold.'):]: v for k, v in pd_params.items()}
from collections import defaultdict
qkv_dicts = defaultdict(dict)
if model_config.model.global_config.fuse_attention:
for key in pd_params:
if 'msa_column_global_attention' not in key and 'attention' in key and ('query_w' in key or 'key_w' in key or 'value_w' in key):
prefix = key[:key.rfind('.')]
if 'extra_msa_stack' in key:
qkv_dicts[prefix][key] = pd_params[key]
#print(key)
elif 'evoformer_iteration' in key:
qkv_dicts[prefix][key] = pd_params[key]
#print(key)
elif 'template_pair_stack' in key:
qkv_dicts[prefix][key] = pd_params[key]
#print(key)
for prefix in qkv_dicts:
query_w = qkv_dicts[prefix][prefix + '.query_w']
key_w = qkv_dicts[prefix][prefix + '.key_w']
value_w = qkv_dicts[prefix][prefix + '.value_w']
if query_w.shape[0] == key_w.shape[0] and key_w.shape[0] == value_w.shape[0]:
# 1. merge to [3, num_head, key_dim, q_dim]
qkv_w = np.stack([query_w, key_w, value_w], axis=0).transpose((0, 2, 3, 1))
# 2. remove seperated param
del pd_params[prefix + '.query_w']
del pd_params[prefix + '.key_w']
del pd_params[prefix + '.value_w']
# 3. add merged param to pd_params
pd_params[prefix + '.qkv_w'] = qkv_w
elif args.init_model.endswith('.pdparams'):
pd_params = paddle.load(args.init_model)
else:
raise ValueError('Unsupported params file type')
model.alphafold.set_state_dict(pd_params)
if args.precision == "bf16" and args.amp_level == "O2":
print(f"args.amp_level : {args.amp_level}")
model = paddle.amp.decorate(
models=model,
level=args.amp_level,
dtype='bfloat16',
excluded_layers=model.alphafold.alphafold_iteration.heads
)
optimizer, lr_scheduler = get_optimizer(args, train_config.optimizer, model)
args.grad_clip = train_config.optimizer.grad_clip
# ema = ExponentialMovingAverage(model, 0.999)
ema = EMA(optimizer._param_groups, 0.999)
ema.register()
### load dataset
if not args.only_test:
train_dataset = AF2Dataset(
model_config=model_config,
data_config=data_config.train,
trainer_id=dp_rank,
trainer_num=dp_nranks,
crop_size=train_config.crop_size,
is_pad_if_crop=True,
delete_msa_block=True,
is_shuffle=True)
if 'valid' in data_config:
valid_dataset = AF2Dataset(
model_config=model_config,
data_config=data_config.valid,
trainer_id=dp_rank,
trainer_num=dp_nranks,
is_shuffle=False)
else:
valid_dataset = None
test_dataset_dict = {}
if 'test' in data_config:
for test_name in data_config.test:
test_dataset_dict[test_name] = AF2TestDataset(
model_config=model_config,
data_config=data_config.test[test_name],
trainer_id=dp_rank,
trainer_num=dp_nranks)
distill_dataset = None
if 'distill' in data_config:
distill_dataset = AF2DistillDataset(
model_config=model_config,
data_config=data_config.distill,
trainer_id=dp_rank,
trainer_num=dp_nranks,
crop_size=train_config.crop_size,
is_pad_if_crop=True,
delete_msa_block=True,
is_shuffle=True)
### if only_test
if args.only_test:
full_eval(args, args.start_step, model, valid_dataset, test_dataset_dict, None, ema)
exit(0)
### create data loader
train_loader = paddle.io.DataLoader(
dataset=train_dataset,
batch_sampler=LoopedBatchSampler(
dataset=train_dataset,
shuffle=True,
batch_size=args.batch_size,
drop_last=False),
num_workers=args.num_workers,
worker_init_fn=worker_init_fn if args.seed is not None else None)
train_data_gen = iter(train_loader)
distill_data_gen = None
if distill_dataset:
distill_loader = paddle.io.DataLoader(
dataset=distill_dataset,
batch_sampler=LoopedBatchSampler(
dataset=distill_dataset,
shuffle=True,
batch_size=args.batch_size,
drop_last=False),
num_workers=args.num_workers,
worker_init_fn=worker_init_fn if args.seed is not None else None)
distill_data_gen = iter(distill_loader)
### start training
if dist.get_rank() == 0:
try: # permission denied error if without root
data_writer = SummaryWriter(f'{args.log_dir}/tensorboard_log_dir', max_queue=0)
except Exception as ex:
print(f'Create data_writer failed: {ex}')
data_writer = None
else:
data_writer = None
train_logger = TrainLogger()
res_collect = ResultsCollect()
cur_step = args.start_step
for _ in range(cur_step):
lr_scheduler.step()
logging.info('[Main] Start training.')
while True:
# reset train log info
if cur_step == 5:
train_logger.reset()
if cur_step >= args.train_step:
break
# train
train(args, cur_step, model, train_data_gen, distill_data_gen, train_config, model_config, \
lr_scheduler, optimizer, res_collect, train_logger, ema)
if cur_step % args.log_step == 0:
train_results = res_collect.get_result()
train_results['lr'] = lr_scheduler.get_lr()
train_results['batch_size'] = args.global_batch_size
add_to_data_writer(data_writer, cur_step, train_results, prefix='train')
res_collect = ResultsCollect()
# evaluate
if cur_step % args.eval_step == 0:
full_eval(args, cur_step, model, valid_dataset, test_dataset_dict, data_writer, ema)
# save params
if cur_step % args.save_step == 0 and dist.get_rank() == 0:
paddle.save(model.alphafold.state_dict(), f'{args.model_dir}/step_{cur_step}.pdparams')
if args.paddlecloud:
upload_to_hadoop(args, cur_step)
cur_step += 1
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--distributed", action='store_true', default=False)
parser.add_argument("--paddlecloud", action='store_true', default=False)
parser.add_argument("--only_test", action='store_true', default=False)
parser.add_argument("--seed", type=int, default=None, help="set seed for reproduce experiment results, None is do not set seed")
parser.add_argument("--logging_level", type=str, default="DEBUG", help="NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL")
parser.add_argument("--tm_score_bin", type=str, help="path to tm_score bin")
parser.add_argument("--lddt_score_bin", type=str, help="path to lddt bin")
parser.add_argument("--data_config", type=str, help="path to data config")
parser.add_argument("--train_config", type=str, help='path to train config')
parser.add_argument("--model_name", type=str, help='used to choose model config')
parser.add_argument("--init_model", type=str, default='')
parser.add_argument("--precision", type=str, choices=['fp32', 'bf16'], default='fp32')
parser.add_argument("--amp_level", type=str, default='O1')
parser.add_argument("--start_step", type=int, default=0)
parser.add_argument("--train_step", type=int, default=1000)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--gradient_merge_k_steps", type=int, default=1)
parser.add_argument("--model_dir", type=str, default='./models')
parser.add_argument("--log_dir", type=str, default='./log')
parser.add_argument("--log_step", type=int, default=20)
parser.add_argument("--eval_step", type=int, default=200)
parser.add_argument("--save_step", type=int, default=200)
parser.add_argument("--dap_degree", type=int, default=1)
parser.add_argument("--dap_comm_sync", action='store_true', default=True)
parser.add_argument("--bp_degree", type=int, default=1)
args = parser.parse_args()
main(args)
| 23,213 | 38.75 | 152 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold/alphafold_paddle/model/modules.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules."""
import gc
import numpy as np
import paddle
import paddle.nn as nn
from paddle.fluid.framework import _dygraph_tracer
from paddle.distributed.fleet.utils import recompute
try:
from paddle import _legacy_C_ops as _C_ops
except:
from paddle import _C_ops
from alphafold_paddle.common import residue_constants
from alphafold_paddle.model.utils import mask_mean, subbatch
from alphafold_paddle.model import folding, lddt, quat_affine, all_atom
from alphafold_paddle.model.utils import init_gate_linear, init_final_linear
from utils.utils import get_structure_module_bf16_op_list
# Map head name in config to head name in model params
Head_names = {
'masked_msa': 'masked_msa_head',
'distogram': 'distogram_head',
'predicted_lddt': 'predicted_lddt_head',
'predicted_aligned_error': 'predicted_aligned_error_head',
'experimentally_resolved': 'experimentally_resolved_head', # finetune loss
}
def recompute_wrapper(func, *args, is_recompute=True):
"""Function wrapper for recompute"""
if is_recompute:
return recompute(func, *args)
else:
return func(*args)
def softmax_cross_entropy(logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels."""
loss = -paddle.sum(labels * paddle.nn.functional.log_softmax(logits), axis=-1)
return loss
def sigmoid_cross_entropy(logits, labels):
"""Computes sigmoid cross entropy given logits and multiple class labels."""
log_p = paddle.nn.functional.log_sigmoid(logits)
# log(1 - sigmoid(x)) = log_sigmoid(-x), the latter is more numerically stable
log_not_p = paddle.nn.functional.log_sigmoid(-logits)
loss = -labels * log_p - (1. - labels) * log_not_p
return loss
class Dropout(nn.Layer):
def __init__(self, p=0.5, axis=None, mode="upscale_in_train", name=None):
super(Dropout, self).__init__()
if not isinstance(p, (float, int)):
raise TypeError("p argument should be a number")
if p < 0 or p > 1:
raise ValueError("p argument should between 0 and 1")
mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer
if mode not in ('downscale_in_infer', 'upscale_in_train'):
raise ValueError(
"mode argument should be 'downscale_in_infer' or 'upscale_in_train'"
)
if axis and not isinstance(axis, (int, list, tuple)):
raise TypeError("datatype of axis argument should be int or list")
self.p = p
self.axis = axis
self.mode = mode
self.name = name
def forward(self, input):
# fast return for p == 0
if self.p == 0:
return input
if self.axis == None:
out = nn.functional.dropout(input,
p=self.p,
axis=self.axis,
training=self.training,
mode=self.mode,
name=self.name)
else:
seed = None
drop_axes = [self.axis] if isinstance(self.axis, int) else list(self.axis)
if paddle.static.default_main_program().random_seed != 0:
seed = paddle.static.default_main_program().random_seed
out, mask = _C_ops.dropout_nd(input, 'dropout_prob', self.p, 'is_test',
not self.training, 'fix_seed', seed
is not None, 'seed',
seed if seed is not None else 0,
'dropout_implementation', self.mode, 'axis',
drop_axes)
return out
def extra_repr(self):
name_str = ', name={}'.format(self.name) if self.name else ''
return 'p={}, axis={}, mode={}{}'.format(self.p, self.axis, self.mode,
name_str)
class AlphaFold(nn.Layer):
"""AlphaFold model with recycling.
Jumper et al. (2021) Suppl. Alg. 2 "Inference"
"""
def __init__(self, channel_num, config):
super(AlphaFold, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = config.global_config
self.alphafold_iteration = AlphaFoldIteration(
self.channel_num, self.config, self.global_config)
def forward(self,
batch,
label,
ensemble_representations=False,
return_representations=False,
compute_loss=True):
"""Run the AlphaFold model.
Arguments:
batch: Dictionary with inputs to the AlphaFold model.
ensemble_representations: Whether to use ensembling of representations.
return_representations: Whether to also return the intermediate
representations.
Returns:
The output of AlphaFoldIteration is a nested dictionary containing
predictions from the various heads.
"""
inner_batch, num_residues = batch['aatype'].shape[1:]
def _get_prev(ret):
new_prev = {
'prev_pos': ret['structure_module']['final_atom_positions'],
'prev_msa_first_row': ret['representations']['msa_first_row'],
'prev_pair': ret['representations']['pair'],
}
for k in new_prev.keys():
new_prev[k].stop_gradient = True
return new_prev
def _run_single_recycling(prev, recycle_idx, compute_loss):
if not self.training:
print(f'########## recycle id: {recycle_idx} ##########')
if self.config.resample_msa_in_recycling:
# (B, (R+1)*E, N, ...)
# B: batch size, R: recycling number,
# E: ensemble number, N: residue number
num_ensemble = inner_batch // (self.config.num_recycle + 1)
ensembled_batch = dict()
for k in batch.keys():
start = recycle_idx * num_ensemble
end = start + num_ensemble
ensembled_batch[k] = batch[k][:, start:end]
else:
# (B, E, N, ...)
num_ensemble = inner_batch
ensembled_batch = batch
non_ensembled_batch = prev
return self.alphafold_iteration(
ensembled_batch, label, non_ensembled_batch,
compute_loss=compute_loss,
ensemble_representations=ensemble_representations)
if self.config.num_recycle:
# aatype: (B, E, N), zeros_bn: (B, N)
zeros_bn_shape = batch['aatype'].shape[0:1] + batch['aatype'].shape[2:]
emb_config = self.config.embeddings_and_evoformer
# if not self.training: # for inference
if not self.training and self.global_config.low_memory is True:
prev = {
'prev_pos': paddle.zeros(
zeros_bn_shape + [residue_constants.atom_type_num, 3], dtype="float32"),
'prev_msa_first_row': paddle.zeros(
zeros_bn_shape + [emb_config.msa_channel], dtype="float32"),
'prev_pair': paddle.zeros(
zeros_bn_shape + [num_residues, emb_config.pair_channel], dtype=paddle.bfloat16),
}
else:
prev = {
'prev_pos': paddle.zeros(
zeros_bn_shape + [residue_constants.atom_type_num, 3], dtype="float32"),
'prev_msa_first_row': paddle.zeros(
zeros_bn_shape + [emb_config.msa_channel], dtype="float32"),
'prev_pair': paddle.zeros(
zeros_bn_shape + [num_residues, emb_config.pair_channel], dtype="float32"),
}
if 'num_iter_recycling' in batch:
# Training trick: dynamic recycling number
num_iter = batch['num_iter_recycling'].numpy()[0, 0]
num_iter = min(int(num_iter), self.config.num_recycle)
else:
num_iter = self.config.num_recycle
for recycle_idx in range(num_iter):
ret = _run_single_recycling(prev, recycle_idx, compute_loss=False)
prev = _get_prev(ret)
# if not self.training:
if not self.training and self.global_config.low_memory is True:
del ret
gc.collect()
else:
prev = {}
num_iter = 0
return _run_single_recycling(prev, num_iter, compute_loss=compute_loss)
class AlphaFoldIteration(nn.Layer):
"""A single recycling iteration of AlphaFold architecture.
Computes ensembled (averaged) representations from the provided features.
These representations are then passed to the various heads
that have been requested by the configuration file. Each head also returns a
loss which is combined as a weighted sum to produce the total loss.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 3-22
"""
def __init__(self, channel_num, config, global_config):
super(AlphaFoldIteration, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
# copy these config for later usage
self.channel_num['extra_msa_channel'] = config.embeddings_and_evoformer.extra_msa_channel
self.channel_num['msa_channel'] = config.embeddings_and_evoformer.msa_channel
self.channel_num['pair_channel'] = config.embeddings_and_evoformer.pair_channel
self.channel_num['seq_channel'] = config.embeddings_and_evoformer.seq_channel
if self.global_config.get('dist_model', False):
from ppfleetx.models.protein_folding.evoformer import DistEmbeddingsAndEvoformer
self.evoformer = DistEmbeddingsAndEvoformer(
self.channel_num, self.config.embeddings_and_evoformer,
self.global_config)
else:
self.evoformer = EmbeddingsAndEvoformer(
self.channel_num, self.config.embeddings_and_evoformer,
self.global_config)
Head_modules = {
'masked_msa': MaskedMsaHead,
'distogram': DistogramHead,
'structure_module': folding.StructureModule,
'predicted_lddt': PredictedLDDTHead,
'predicted_aligned_error': PredictedAlignedErrorHead,
'experimentally_resolved': ExperimentallyResolvedHead, # finetune loss
}
self.used_heads = []
self.heads = []
for head_name, head_config in sorted(self.config.heads.items()):
if head_name not in Head_modules:
continue
self.used_heads.append(head_name)
module = Head_modules[head_name](
self.channel_num, head_config, self.global_config)
head_name_ = Head_names.get(head_name, head_name)
setattr(self, head_name_, module)
self.heads.append(module)
def forward(self,
ensembled_batch,
label,
non_ensembled_batch,
compute_loss=False,
ensemble_representations=False):
num_ensemble = ensembled_batch['seq_length'].shape[1]
if not ensemble_representations:
assert num_ensemble == 1
def _slice_batch(i):
b = {k: v[:, i] for k, v in ensembled_batch.items()}
b.update(non_ensembled_batch)
return b
batch0 = _slice_batch(0)
representations = self.evoformer(batch0)
# MSA representations are not ensembled
msa_representation = representations['msa']
del representations['msa']
# MaskedMSAHead is apply on batch0
label['bert_mask'] = batch0['bert_mask']
label['true_msa'] = batch0['true_msa']
label['residue_index'] = batch0['residue_index']
if ensemble_representations:
for i in range(1, num_ensemble):
batch = _slice_batch(i)
representations_update = self.evoformer(batch)
for k in representations.keys():
representations[k] += representations_update[k]
for k in representations.keys():
representations[k] /= num_ensemble + 0.0
representations['msa'] = msa_representation
ret = {'representations': representations}
def loss(head_name_, head_config, ret, head_name, filter_ret=True):
if filter_ret:
value = ret[head_name]
else:
value = ret
loss_output = getattr(self, head_name_).loss(value, label)
ret[head_name].update(loss_output)
loss = head_config.weight * ret[head_name]['loss']
return loss
def _forward_heads(representations, ret, batch0):
total_loss = 0.
for head_name, head_config in self._get_heads():
head_name_ = Head_names.get(head_name, head_name)
# Skip PredictedLDDTHead and PredictedAlignedErrorHead until
# StructureModule is executed.
if head_name in ('predicted_lddt', 'predicted_aligned_error'):
continue
else:
ret[head_name] = getattr(self, head_name_)(representations, batch0)
if 'representations' in ret[head_name]:
# Extra representations from the head. Used by the
# structure module to provide activations for the PredictedLDDTHead.
representations.update(ret[head_name].pop('representations'))
if compute_loss:
total_loss += loss(head_name_, head_config, ret, head_name)
if self.config.heads.get('predicted_lddt.weight', 0.0):
# Add PredictedLDDTHead after StructureModule executes.
head_name = 'predicted_lddt'
# Feed all previous results to give access to structure_module result.
head_name_ = Head_names.get(head_name, head_name)
head_config = self.config.heads[head_name]
ret[head_name] = getattr(self, head_name_)(representations, batch0)
if compute_loss:
total_loss += loss(head_name_, head_config, ret, head_name, filter_ret=False)
if ('predicted_aligned_error' in self.config.heads
and self.config.heads.get('predicted_aligned_error.weight', 0.0)):
# Add PredictedAlignedErrorHead after StructureModule executes.
head_name = 'predicted_aligned_error'
# Feed all previous results to give access to structure_module result.
head_config = self.config.heads[head_name]
head_name_ = Head_names.get(head_name, head_name)
ret[head_name] = getattr(self, head_name_)(representations, batch0)
if compute_loss:
total_loss += loss(head_name_, head_config, ret, head_name, filter_ret=False)
return ret, total_loss
# if not self.training:
if not self.training and self.global_config.low_memory is True:
black_list, white_list = get_structure_module_bf16_op_list()
with paddle.amp.auto_cast(level='O1', custom_white_list=white_list, custom_black_list=black_list, dtype='bfloat16'):
ret, total_loss = _forward_heads(representations, ret, batch0)
else:
tracer = _dygraph_tracer()
if tracer._amp_dtype == "bfloat16":
with paddle.amp.auto_cast(enable=False):
for key, value in representations.items():
if value.dtype in [paddle.fluid.core.VarDesc.VarType.BF16]:
temp_value = value.cast('float32')
temp_value.stop_gradient = value.stop_gradient
representations[key] = temp_value
for key, value in batch0.items():
if value.dtype in [paddle.fluid.core.VarDesc.VarType.BF16]:
temp_value = value.cast('float32')
temp_value.stop_gradient = value.stop_gradient
batch0[key] = temp_value
ret, total_loss = _forward_heads(representations, ret, batch0)
else:
ret, total_loss = _forward_heads(representations, ret, batch0)
if compute_loss:
return ret, total_loss
else:
return ret
def _get_heads(self):
assert 'structure_module' in self.used_heads
head_names = [h for h in self.used_heads]
for k in head_names:
yield k, self.config.heads[k]
class Attention(nn.Layer):
"""Multihead attention."""
def __init__(self, config, global_config, q_dim, kv_dim, output_dim):
super(Attention, self).__init__()
self.config = config
self.global_config = global_config
num_head = self.config.num_head
key_dim = self.config.get('key_dim', q_dim)
value_dim = self.config.get('value_dim', kv_dim)
# TODO(GuoxiaWang): delete non fuse_attention related code on dcu
self.fuse_attention = self.global_config.fuse_attention
self.use_flash_attn = self.global_config.use_flash_attn
self.merge_qkv = (q_dim == kv_dim)
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
self.key_dim = key_dim
self.value_dim = value_dim
self.qkv_w = None
self.query_w = None
self.key_w = None
self.value_w = None
if self.merge_qkv and self.fuse_attention:
self.qkv_w = paddle.create_parameter(
[3, num_head, key_dim, q_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
else:
self.query_w = paddle.create_parameter(
[q_dim, num_head, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.key_w = paddle.create_parameter(
[kv_dim, num_head, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.value_w = paddle.create_parameter(
[kv_dim, num_head, value_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.gating_w = None
self.gating_b = None
if self.config.gating:
self.gating_w = paddle.create_parameter(
[q_dim, num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
self.gating_b = paddle.create_parameter(
[num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(1.0))
if self.global_config.zero_init:
init = nn.initializer.Constant(0.0)
else:
init = nn.initializer.XavierUniform()
self.output_w = paddle.create_parameter(
[num_head, value_dim, output_dim], 'float32',
default_initializer=init)
self.output_b = paddle.create_parameter(
[output_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
def forward(self, q_data, m_data, bias, nonbatched_bias=None):
"""Builds Attention module.
Arguments:
q_data: A tensor of queries, shape [batch, row_size, N_queries, q_channels].
m_data: A tensor of memories from which the keys and values are
projected, shape [batch, row_size, N_keys, m_channels].
bias: A bias for the attention, shape [batch, row_size, num_head, N_queries, N_keys].
nonbatched_bias: Shared bias, shape [N_queries, N_keys].
Returns:
A float32 tensor of shape [batch_size, row_size, N_queries, output_dim].
"""
if self.fuse_attention:
if nonbatched_bias is not None:
nonbatched_bias = paddle.unsqueeze(nonbatched_bias, axis=1)
import paddle.incubate.nn.functional as F
output = F.fused_gate_attention(
query=q_data,
key=m_data,
query_weight=self.query_w,
key_weight=self.key_w,
value_weight=self.value_w,
qkv_weight=self.qkv_w,
gate_linear_weight=self.gating_w,
gate_linear_bias=self.gating_b,
out_linear_weight=self.output_w,
out_linear_bias=self.output_b,
nonbatched_bias=nonbatched_bias,
attn_mask=bias,
has_gating=self.config.gating,
merge_qkv=self.merge_qkv,
use_flash_attn=self.use_flash_attn,
)
else:
c = self.key_dim ** (-0.5)
q = paddle.einsum('nbqa,ahc->nbqhc', q_data, self.query_w) * c
k = paddle.einsum('nbka,ahc->nbkhc', m_data, self.key_w)
v = paddle.einsum('nbka,ahc->nbkhc', m_data, self.value_w)
logits = paddle.einsum('nbqhc,nbkhc->nbhqk', q, k) + bias
if nonbatched_bias is not None:
logits += paddle.unsqueeze(nonbatched_bias, axis=1)
weights = nn.functional.softmax(logits)
weighted_avg = paddle.einsum('nbhqk,nbkhc->nbqhc', weights, v)
if self.config.gating:
gate_values = paddle.einsum('nbqc,chv->nbqhv', q_data,
self.gating_w) + self.gating_b
gate_values = nn.functional.sigmoid(gate_values)
weighted_avg *= gate_values
output = paddle.einsum('nbqhc,hco->nbqo', weighted_avg,
self.output_w) + self.output_b
return output
class GlobalAttention(nn.Layer):
"""Global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention" lines 2-7
"""
def __init__(self, config, global_config, q_dim, kv_dim, output_dim):
super(GlobalAttention, self).__init__()
self.config = config
self.global_config = global_config
num_head = self.config.num_head
key_dim = self.config.get('key_dim', q_dim)
value_dim = self.config.get('value_dim', kv_dim)
assert key_dim % num_head == 0
assert value_dim % num_head == 0
key_dim = key_dim // num_head
value_dim = value_dim // num_head
self.key_dim = key_dim
self.value_dim = value_dim
self.query_w = paddle.create_parameter(
[q_dim, num_head, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.key_w = paddle.create_parameter(
[kv_dim, key_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
self.value_w = paddle.create_parameter(
[kv_dim, value_dim], 'float32',
default_initializer=nn.initializer.XavierUniform())
if self.config.gating:
self.gating_w = paddle.create_parameter(
[q_dim, num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
self.gating_b = paddle.create_parameter(
[num_head, value_dim], 'float32',
default_initializer=nn.initializer.Constant(1.0))
if self.global_config.zero_init:
init = nn.initializer.Constant(0.0)
else:
init = nn.initializer.XavierUniform()
self.output_w = paddle.create_parameter(
[num_head, value_dim, output_dim], 'float32',
default_initializer=init)
self.output_b = paddle.create_parameter(
[output_dim], 'float32',
default_initializer=nn.initializer.Constant(0.0))
def forward(self, q_data, m_data, q_mask):
k = paddle.einsum('nbka,ac->nbkc', m_data, self.key_w)
v = paddle.einsum('nbka,ac->nbkc', m_data, self.value_w)
# NOTE: differ from non-global version using q_avg for attn
q_avg = mask_mean(q_mask, q_data, axis=2)
c = self.key_dim ** (-0.5)
q = paddle.einsum('nba,ahc->nbhc', q_avg, self.query_w) * c
q_mask_ = paddle.unsqueeze(q_mask, axis=2)[..., 0]
bias = 1e9 * (q_mask_ - 1.)
logits = paddle.einsum('nbhc,nbkc->nbhk', q, k) + bias
weights = nn.functional.softmax(logits)
weighted_avg = paddle.einsum('nbhk,nbkc->nbhc', weights, v)
if self.config.gating:
gate_values = paddle.einsum('nbqc,chv->nbqhv', q_data,
self.gating_w) + self.gating_b
gate_values = nn.functional.sigmoid(gate_values)
weighted_avg = paddle.unsqueeze(weighted_avg, axis=2)
weighted_avg *= gate_values
output = paddle.einsum('nbqhc,hco->nbqo', weighted_avg,
self.output_w) + self.output_b
else:
output = paddle.einsum('nbhc,hco->nbo', weighted_avg,
self.output_w) + self.output_b
output = paddle.unsqueeze(output, axis=-1)
return output
class MSARowAttentionWithPairBias(nn.Layer):
"""MSA per-row attention biased by the pair representation.
Jumper et al. (2021) Suppl. Alg. 7 "MSARowAttentionWithPairBias"
"""
def __init__(self, channel_num, config, global_config, is_extra_msa):
super(MSARowAttentionWithPairBias, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
assert config.orientation == 'per_row'
if is_extra_msa:
self.query_norm = nn.LayerNorm(channel_num['extra_msa_channel'])
else:
self.query_norm = nn.LayerNorm(channel_num['msa_channel'])
self.feat_2d_norm = nn.LayerNorm(channel_num['pair_channel'])
self.feat_2d_weights = paddle.create_parameter(
[channel_num['pair_channel'], self.config.num_head], 'float32',
default_initializer=nn.initializer.Normal(
std=1. / np.sqrt(channel_num['pair_channel'])))
if is_extra_msa:
extra_msa_channel = channel_num['extra_msa_channel']
self.attention = Attention(
self.config, self.global_config,
extra_msa_channel, extra_msa_channel, extra_msa_channel)
else:
msa_channel = channel_num['msa_channel']
self.attention = Attention(
self.config, self.global_config,
msa_channel, msa_channel, msa_channel)
def forward(self, msa_act, msa_mask, pair_act):
pair_act = self.feat_2d_norm(pair_act)
nonbatched_bias = paddle.einsum(
'nqkc,ch->nhqk', pair_act, self.feat_2d_weights)
bias = 1e9 * (msa_mask - 1.)
bias = paddle.unsqueeze(bias, axis=[2, 3])
msa_act = self.query_norm(msa_act)
if not self.training or (self.is_extra_msa and self.config.use_subbatch):
# low memory mode using subbatch
subbatch_size = self.config.subbatch_size
if not self.training:
subbatch_size = self.global_config.subbatch_size
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
subbatch_size, 1, same_arg_idx={1: 0})
msa_act = sb_attn(msa_act, msa_act, bias, nonbatched_bias)
else:
msa_act = self.attention(msa_act, msa_act, bias, nonbatched_bias)
return msa_act
class MSAColumnGlobalAttention(nn.Layer):
"""MSA per-column global attention.
Jumper et al. (2021) Suppl. Alg. 19 "MSAColumnGlobalAttention"
"""
def __init__(self, channel_num, config, global_config):
super(MSAColumnGlobalAttention, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
assert config.orientation == 'per_column'
extra_msa_channel = channel_num['extra_msa_channel']
self.query_norm = nn.LayerNorm(extra_msa_channel)
self.attention = GlobalAttention(
self.config, self.global_config,
extra_msa_channel, extra_msa_channel, extra_msa_channel)
def forward(self, msa_act, msa_mask):
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
msa_mask = paddle.transpose(msa_mask, [0, 2, 1])
bias = 1e9 * (msa_mask - 1.)
bias = paddle.unsqueeze(bias, axis=[2, 3])
msa_mask = paddle.unsqueeze(msa_mask, axis=-1)
msa_act = self.query_norm(msa_act)
if not self.training:
# low memory mode using subbatch
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
self.global_config.subbatch_size, 1, same_arg_idx={1: 0})
msa_act = sb_attn(msa_act, msa_act, msa_mask)
else:
msa_act = self.attention(msa_act, msa_act, msa_mask)
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
return msa_act
class MSAColumnAttention(nn.Layer):
"""MSA per-column attention.
Jumper et al. (2021) Suppl. Alg. 8 "MSAColumnAttention"
"""
def __init__(self, channel_num, config, global_config):
super(MSAColumnAttention, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
assert config.orientation == 'per_column'
msa_channel = channel_num['msa_channel']
self.query_norm = nn.LayerNorm(msa_channel)
self.attention = Attention(
self.config, self.global_config,
msa_channel, msa_channel, msa_channel)
def forward(self, msa_act, msa_mask):
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
msa_mask = paddle.transpose(msa_mask, [0, 2, 1])
bias = 1e9 * (msa_mask - 1.)
bias = paddle.unsqueeze(bias, axis=[2, 3])
msa_act = self.query_norm(msa_act)
if not self.training:
# low memory mode using subbatch
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
self.global_config.subbatch_size, 1, same_arg_idx={1: 0})
msa_act = sb_attn(msa_act, msa_act, bias)
else:
msa_act = self.attention(msa_act, msa_act, bias)
msa_act = paddle.transpose(msa_act, [0, 2, 1, 3])
return msa_act
class Transition(nn.Layer):
"""Transition layer.
Jumper et al. (2021) Suppl. Alg. 9 "MSATransition"
Jumper et al. (2021) Suppl. Alg. 15 "PairTransition"
"""
def __init__(self, channel_num, config, global_config, is_extra_msa,
transition_type):
super(Transition, self).__init__()
assert transition_type in ['msa_transition', 'pair_transition']
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
self.transition_type = transition_type
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
if transition_type == 'msa_transition' and is_extra_msa:
in_dim = channel_num['extra_msa_channel']
elif transition_type == 'msa_transition' and not is_extra_msa:
in_dim = channel_num['msa_channel']
elif transition_type == 'pair_transition':
in_dim = channel_num['pair_channel']
self.input_layer_norm = nn.LayerNorm(in_dim)
self.transition1 = Linear(
in_dim,
int(in_dim * self.config.num_intermediate_factor),
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.KaimingNormal()))
if self.global_config.zero_init:
last_init = nn.initializer.Constant(0.0)
else:
last_init = nn.initializer.TruncatedNormal()
self.transition2 = Linear(
int(in_dim * self.config.num_intermediate_factor),
in_dim,
weight_attr=paddle.ParamAttr(initializer=last_init))
def forward(self, act, mask):
act = self.input_layer_norm(act)
def transition_module(x):
x = self.transition1(x)
x = nn.functional.relu(x)
x = self.transition2(x)
return x
if not self.training:
# low memory mode using subbatch
sb_transition = subbatch(transition_module, [0], [1],
self.global_config.subbatch_size, 1)
act = sb_transition(act)
else:
act = transition_module(act)
return act
class MaskedMsaHead(nn.Layer):
"""Head to predict MSA at the masked locations.
The MaskedMsaHead employs a BERT-style objective to reconstruct a masked
version of the full MSA, based on a linear projection of
the MSA representation.
Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction"
"""
def __init__(self, channel_num, config, global_config, name='masked_msa_head'):
super(MaskedMsaHead, self).__init__()
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.num_output = config.num_output
self.logits = Linear(channel_num['msa_channel'], self.num_output, name='logits')
def forward(self, representations, batch):
"""Builds MaskedMsaHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'msa': MSA representation, shape [batch, N_seq, N_res, c_m].
batch: Batch, unused.
Returns:
Dictionary containing:
* 'logits': logits of shape [batch, N_seq, N_res, N_aatype] with
(unnormalized) log probabilies of predicted aatype at position.
"""
del batch
logits = self.logits(representations['msa'])
return {'logits': logits}
def loss(self, value, batch):
errors = softmax_cross_entropy(
labels=paddle.nn.functional.one_hot(batch['true_msa'], num_classes=self.num_output),
logits=value['logits'])
loss = (paddle.sum(errors * paddle.cast(batch['bert_mask'], dtype=errors.dtype), axis=[-2, -1]) /
(1e-8 + paddle.sum(batch['bert_mask'], axis=[-2, -1])))
return {'loss': loss}
class PredictedLDDTHead(nn.Layer):
"""Head to predict the per-residue LDDT to be used as a confidence measure.
Jumper et al. (2021) Suppl. Sec. 1.9.6 "Model confidence prediction (pLDDT)"
Jumper et al. (2021) Suppl. Alg. 29 "predictPerResidueLDDT_Ca"
"""
def __init__(self, channel_num, config, global_config, name='predicted_lddt_head'):
super(PredictedLDDTHead, self).__init__()
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.input_layer_norm = nn.LayerNorm(channel_num['seq_channel'],
name='input_layer_norm')
self.act_0 = Linear(
channel_num['seq_channel'], self.config.num_channels, name='act_0')
self.act_1 = Linear(
self.config.num_channels, self.config.num_channels, name='act_1')
self.logits = Linear(
self.config.num_channels, self.config.num_bins, name='logits')
def forward(self, representations, batch):
"""Builds PredictedLDDTHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'structure_module': Single representation from the structure module,
shape [n_batch, N_res, c_s].
Returns:
Dictionary containing :
* 'logits': logits of shape [n_batch, N_res, N_bins] with
(unnormalized) log probabilies of binned predicted lDDT.
"""
act = representations['structure_module']
act = self.input_layer_norm(act)
act = nn.functional.relu(self.act_0(act))
act = nn.functional.relu(self.act_1(act))
logits = self.logits(act)
return dict(logits=logits)
def loss(self, value, batch):
# Shape (n_batch, num_res, 37, 3)
pred_all_atom_pos = value['structure_module']['final_atom_positions']
# Shape (n_batch, num_res, 37, 3)
true_all_atom_pos = paddle.cast(batch['all_atom_positions'], 'float32')
# Shape (n_batch, num_res, 37)
all_atom_mask = paddle.cast(batch['all_atom_mask'], 'float32')
# Shape (batch_size, num_res)
lddt_ca = lddt.lddt(
# Shape (batch_size, num_res, 3)
predicted_points=pred_all_atom_pos[:, :, 1, :],
# Shape (batch_size, num_res, 3)
true_points=true_all_atom_pos[:, :, 1, :],
# Shape (batch_size, num_res, 1)
true_points_mask=all_atom_mask[:, :, 1:2],
cutoff=15.,
per_residue=True)
lddt_ca = lddt_ca.detach()
# Shape (batch_size, num_res)
num_bins = self.config.num_bins
bin_index = paddle.floor(lddt_ca * num_bins)
# protect against out of range for lddt_ca == 1
bin_index = paddle.minimum(bin_index, paddle.full(shape=[1], fill_value=num_bins - 1, dtype='float32'))
lddt_ca_one_hot = paddle.nn.functional.one_hot(paddle.cast(bin_index, 'int64'), num_classes=num_bins)
# Shape (n_batch, num_res, num_channel)
logits = value['predicted_lddt']['logits']
errors = softmax_cross_entropy(labels=lddt_ca_one_hot, logits=logits)
# Shape (num_res,)
mask_ca = all_atom_mask[:, :, residue_constants.atom_order['CA']]
mask_ca = paddle.to_tensor(mask_ca, dtype='float32')
loss = paddle.sum(errors * mask_ca, axis=-1) / (paddle.sum(mask_ca, axis=-1) + 1e-8)
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
resolution = paddle.squeeze(batch['resolution'], axis=-1)
loss *= paddle.cast((resolution >= self.config.min_resolution)
& (resolution <= self.config.max_resolution), 'float32')
output = {'loss': loss}
return output
class PredictedAlignedErrorHead(nn.Layer):
"""Head to predict the distance errors in the backbone alignment frames.
Can be used to compute predicted TM-Score.
Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction"
"""
def __init__(self, channel_num, config, global_config,
name='predicted_aligned_error_head'):
super(PredictedAlignedErrorHead, self).__init__()
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.logits = Linear(
channel_num['pair_channel'], self.config.num_bins, name='logits')
def forward(self, representations, batch):
"""Builds PredictedAlignedErrorHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [B, N_res, N_res, c_z].
batch: Batch, unused.
Returns:
Dictionary containing:
* logits: logits for aligned error, shape [B, N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [N_bins - 1].
"""
logits = self.logits(representations['pair'])
breaks = paddle.linspace(0., self.config.max_error_bin,
self.config.num_bins-1)
return dict(logits=logits, breaks=breaks)
def loss(self, value, batch):
# Shape (B, num_res, 7)
predicted_affine = quat_affine.QuatAffine.from_tensor(
value['structure_module']['final_affines'])
# Shape (B, num_res, 7)
true_rot = paddle.to_tensor(batch['backbone_affine_tensor_rot'], dtype='float32')
true_trans = paddle.to_tensor(batch['backbone_affine_tensor_trans'], dtype='float32')
true_affine = quat_affine.QuatAffine(
quaternion=None,
translation=true_trans,
rotation=true_rot)
# Shape (B, num_res)
mask = batch['backbone_affine_mask']
# Shape (B, num_res, num_res)
# mask[..., None] * mask[:, None, :]
square_mask = mask.unsqueeze(axis=-1) * mask.squeeze(axis=-2)
num_bins = self.config.num_bins
# (num_bins - 1)
breaks = value['predicted_aligned_error']['breaks']
# (B, num_res, num_res, num_bins)
logits = value['predicted_aligned_error']['logits']
# Compute the squared error for each alignment.
def _local_frame_points(affine):
points = [paddle.unsqueeze(x, axis=-2) for x in
paddle.unstack(affine.translation, axis=-1)]
return affine.invert_point(points, extra_dims=1)
error_dist2_xyz = [
paddle.square(a - b)
for a, b in zip(_local_frame_points(predicted_affine),
_local_frame_points(true_affine))]
error_dist2 = sum(error_dist2_xyz)
# Shape (B, num_res, num_res)
# First num_res are alignment frames, second num_res are the residues.
error_dist2 = error_dist2.detach()
sq_breaks = paddle.square(breaks)
true_bins = paddle.sum(paddle.cast((error_dist2.unsqueeze(axis=-1) > sq_breaks), 'int32'), axis=-1)
errors = softmax_cross_entropy(
labels=paddle.nn.functional.one_hot(true_bins, num_classes=num_bins), logits=logits)
loss = (paddle.sum(errors * square_mask, axis=[-2, -1]) /
(1e-8 + paddle.sum(square_mask, axis=[-2, -1])))
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
resolution = paddle.squeeze(batch['resolution'], axis=-1)
loss *= paddle.cast((resolution >= self.config.min_resolution)
& (resolution <= self.config.max_resolution), 'float32')
output = {'loss': loss}
return output
class ExperimentallyResolvedHead(nn.Layer):
"""Predicts if an atom is experimentally resolved in a high-res structure.
Only trained on high-resolution X-ray crystals & cryo-EM.
Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction'
"""
def __init__(self, channel_num, config, global_config, name='experimentally_resolved_head'):
super(ExperimentallyResolvedHead, self).__init__()
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.logits = Linear(channel_num['seq_channel'], 37, name='logits')
def forward(self, representations, batch):
"""Builds ExperimentallyResolvedHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'single': Single representation, shape [B, N_res, c_s].
batch: Batch, unused.
Returns:
Dictionary containing:
* 'logits': logits of shape [B, N_res, 37],
log probability that an atom is resolved in atom37 representation,
can be converted to probability by applying sigmoid.
"""
logits = self.logits(representations['single'])
return dict(logits=logits)
def loss(self, value, batch):
logits = value['logits']
assert len(logits.shape) == 3
# Does the atom appear in the amino acid?
atom_exists = batch['atom37_atom_exists']
# Is the atom resolved in the experiment? Subset of atom_exists,
# *except for OXT*
all_atom_mask = paddle.cast(batch['all_atom_mask'], 'float32')
xent = sigmoid_cross_entropy(labels=all_atom_mask, logits=logits)
loss = paddle.sum(xent * atom_exists, axis=[-2, -1]) / (1e-8 + paddle.sum(atom_exists, axis=[-2, -1]))
if self.config.filter_by_resolution:
# NMR & distillation have resolution = 0
resolution = paddle.squeeze(batch['resolution'], axis=-1)
loss *= paddle.cast((resolution >= self.config.min_resolution)
& (resolution <= self.config.max_resolution), 'float32')
output = {'loss': loss}
return output
class DistogramHead(nn.Layer):
"""Head to predict a distogram.
Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction"
"""
def __init__(self, channel_num, config, global_config, name='distogram_head'):
super(DistogramHead, self).__init__()
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.half_logits = Linear(
channel_num['pair_channel'], self.config.num_bins, name='half_logits')
init_final_linear(self.half_logits)
def forward(self, representations, batch):
"""Builds DistogramHead module.
Arguments:
representations: Dictionary of representations, must contain:
* 'pair': pair representation, shape [batch, N_res, N_res, c_z].
Returns:
Dictionary containing:
* logits: logits for distogram, shape [batch, N_res, N_res, N_bins].
* bin_breaks: array containing bin breaks, shape [batch, N_bins - 1].
"""
half_logits = self.half_logits(representations['pair'])
logits = half_logits + paddle.transpose(half_logits, perm=[0, 2, 1, 3])
breaks = paddle.linspace(self.config.first_break, self.config.last_break,
self.config.num_bins - 1)
breaks = paddle.broadcast_to(breaks, [logits.shape[0]] + breaks.shape)
# if not self.training:
if not self.training and self.global_config.low_memory is True:
logits_cpu = logits.cpu()
del logits
return {
# 'logits': logits,
'logits': logits_cpu if not self.training and self.global_config.low_memory is True else logits,
'bin_edges': breaks}
def loss(self, value, batch):
return _distogram_log_loss(value['logits'], value['bin_edges'],
batch, self.config.num_bins)
def _distogram_log_loss(logits, bin_edges, batch, num_bins):
"""Log loss of a distogram."""
positions = batch['pseudo_beta']
mask = batch['pseudo_beta_mask']
assert positions.shape[-1] == 3
sq_breaks = paddle.square(bin_edges).unsqueeze([1, 2])
dist2 = paddle.sum(
paddle.square(
paddle.unsqueeze(positions, axis=-2) -
paddle.unsqueeze(positions, axis=-3)),
axis=-1,
keepdim=True)
true_bins = paddle.sum(dist2 > sq_breaks, axis=-1)
errors = softmax_cross_entropy(
labels=paddle.nn.functional.one_hot(true_bins, num_classes=num_bins), logits=logits)
square_mask = paddle.unsqueeze(mask, axis=-2) * paddle.unsqueeze(mask, axis=-1)
avg_error = (
paddle.sum(errors * square_mask, axis=[-2, -1]) /
(1e-6 + paddle.sum(square_mask, axis=[-2, -1])))
dist2 = dist2[..., 0]
return {
'loss': avg_error,
'true_dist': paddle.sqrt(1e-6 + dist2)}
def dgram_from_positions(positions, num_bins, min_bin, max_bin):
lower_breaks = paddle.linspace(min_bin, max_bin, num_bins)
lower_breaks = paddle.square(lower_breaks)
upper_breaks = paddle.concat([lower_breaks[1:],
paddle.full(shape=[1], fill_value=1e8, dtype='float32')])
def _squared_difference(x, y):
return paddle.square(x - y)
dist2 = paddle.sum(
_squared_difference(
paddle.unsqueeze(positions, axis=-2),
paddle.unsqueeze(positions, axis=-3)),
axis=-1, keepdim=True)
dgram = ((dist2 > lower_breaks.astype(dist2.dtype)).astype('float32') *
(dist2 < upper_breaks.astype(dist2.dtype)).astype('float32'))
return dgram
class EvoformerIteration(nn.Layer):
"""Single iteration (block) of Evoformer stack.
Jumper et al. (2021) Suppl. Alg. 6 "EvoformerStack" lines 2-10
"""
def __init__(self, channel_num, config, global_config, is_extra_msa=False):
super(EvoformerIteration, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
self.is_extra_msa = is_extra_msa
# Row-wise Gated Self-attention with Pair Bias
self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias(
channel_num, self.config.msa_row_attention_with_pair_bias,
self.global_config, is_extra_msa)
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_row_attention_with_pair_bias)
self.msa_row_attn_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
if self.is_extra_msa:
self.msa_column_global_attention = MSAColumnGlobalAttention(
channel_num, config.msa_column_attention, global_config)
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_column_global_attention)
self.msa_col_attn_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
else:
self.msa_column_attention = MSAColumnAttention(
channel_num, config.msa_column_attention, global_config)
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_column_attention)
self.msa_col_attn_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.msa_transition = Transition(
channel_num, self.config.msa_transition, self.global_config,
is_extra_msa, 'msa_transition')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.msa_transition)
self.msa_transition_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# OuterProductMean
self.outer_product_mean = OuterProductMean(channel_num,
self.config.outer_product_mean, self.global_config,
self.is_extra_msa, name='outer_product_mean')
# Dropout
dropout_rate, dropout_axis = self._parse_dropout_params(
self.outer_product_mean)
self.outer_product_mean_dropout = nn.Dropout(
dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# Triangle Multiplication.
self.triangle_multiplication_outgoing = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_outgoing, self.global_config,
name='triangle_multiplication_outgoing')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_outgoing)
self.triangle_outgoing_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_multiplication_incoming = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_incoming, self.global_config,
name='triangle_multiplication_incoming')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_incoming)
self.triangle_incoming_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# TriangleAttention.
self.triangle_attention_starting_node = TriangleAttention(channel_num,
self.config.triangle_attention_starting_node, self.global_config,
name='triangle_attention_starting_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_starting_node)
self.triangle_starting_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_attention_ending_node = TriangleAttention(channel_num,
self.config.triangle_attention_ending_node, self.global_config,
name='triangle_attention_ending_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_ending_node)
self.triangle_ending_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
# Pair transition.
self.pair_transition = Transition(
channel_num, self.config.pair_transition, self.global_config,
is_extra_msa, 'pair_transition')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.pair_transition)
self.pair_transition_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
def _parse_dropout_params(self, module):
dropout_rate = 0.0 if self.global_config.deterministic else \
module.config.dropout_rate
dropout_axis = None
if module.config.shared_dropout:
dropout_axis = {
'per_row': [0, 2, 3],
'per_column': [0, 1, 3],
}[module.config.orientation]
return dropout_rate, dropout_axis
def outer_product_mean_origin(self, msa_act, pair_act, masks):
msa_mask, pair_mask = masks['msa'], masks['pair']
residual = self.msa_row_attention_with_pair_bias(
msa_act, msa_mask, pair_act)
residual = self.msa_row_attn_dropout(residual)
msa_act = msa_act + residual
if self.is_extra_msa:
residual = self.msa_column_global_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
else:
residual = self.msa_column_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
residual = self.outer_product_mean(msa_act, msa_mask)
residual = self.outer_product_mean_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_incoming(pair_act, pair_mask)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_starting_node(pair_act, pair_mask)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_ending_node(pair_act, pair_mask)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
return msa_act, pair_act
def outer_product_mean_first(self, msa_act, pair_act, masks):
msa_mask, pair_mask = masks['msa'], masks['pair']
residual = self.outer_product_mean(msa_act, msa_mask)
outer_product_mean = self.outer_product_mean_dropout(residual)
pair_act = pair_act + outer_product_mean
residual = self.msa_row_attention_with_pair_bias(
msa_act, msa_mask, pair_act)
residual = self.msa_row_attn_dropout(residual)
msa_act = msa_act + residual
if self.is_extra_msa:
residual = self.msa_column_global_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
else:
residual = self.msa_column_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_incoming(pair_act, pair_mask)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_starting_node(pair_act, pair_mask)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_ending_node(pair_act, pair_mask)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
return msa_act, pair_act
def outer_product_mean_end(self, msa_act, pair_act, masks):
msa_mask, pair_mask = masks['msa'], masks['pair']
residual = self.msa_row_attention_with_pair_bias(
msa_act, msa_mask, pair_act)
residual = self.msa_row_attn_dropout(residual)
msa_act = msa_act + residual
if self.is_extra_msa:
residual = self.msa_column_global_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
else:
residual = self.msa_column_attention(msa_act, msa_mask)
residual = self.msa_col_attn_dropout(residual)
msa_act = msa_act + residual
residual = self.msa_transition(msa_act, msa_mask)
residual = self.msa_transition_dropout(residual)
msa_act = msa_act + residual
residual = self.outer_product_mean(msa_act, msa_mask)
outer_product_mean = self.outer_product_mean_dropout(residual)
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_incoming(pair_act, pair_mask)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_starting_node(pair_act, pair_mask)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_ending_node(pair_act, pair_mask)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
pair_act = pair_act + outer_product_mean
return msa_act, pair_act
def forward(self, msa_act, pair_act, masks):
if self.global_config.outer_product_mean_position in ['origin', 'middle']:
msa_act, pair_act = self.outer_product_mean_origin(msa_act, pair_act, masks)
elif self.global_config.outer_product_mean_position == 'first':
msa_act, pair_act = self.outer_product_mean_first(msa_act, pair_act, masks)
elif self.global_config.outer_product_mean_position == 'end':
msa_act, pair_act = self.outer_product_mean_end(msa_act, pair_act, masks)
else:
raise Error("Only support outer_product_mean_position in ['origin', 'middle', ''first', 'end'] now!")
return msa_act, pair_act
class EmbeddingsAndEvoformer(nn.Layer):
"""Embeds the input data and runs Evoformer.
Produces the MSA, single and pair representations.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5-18
"""
def __init__(self, channel_num, config, global_config):
super(EmbeddingsAndEvoformer, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
# InputEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5
# Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder"
self.preprocess_1d = Linear(
channel_num['target_feat'], self.config.msa_channel, name='preprocess_1d')
self.preprocess_msa = Linear(
channel_num['msa_feat'], self.config.msa_channel, name='preprocess_msa')
self.left_single = Linear(
channel_num['target_feat'], self.config.pair_channel, name='left_single')
self.right_single = Linear(
channel_num['target_feat'], self.config.pair_channel, name='right_single')
# RecyclingEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6
# Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder"
if self.config.recycle_pos:
self.prev_pos_linear = Linear(
self.config.prev_pos.num_bins, self.config.pair_channel)
# RelPosEmbedder
# Jumper et al. (2021) Suppl. Alg. 4 "relpos"
# Jumper et al. (2021) Suppl. Alg. 5 "one_hot"
if self.config.max_relative_feature:
self.pair_activiations = Linear(
2 * self.config.max_relative_feature + 1,
self.config.pair_channel)
if self.config.recycle_features:
self.prev_msa_first_row_norm = nn.LayerNorm(
self.config.msa_channel)
self.prev_pair_norm = nn.LayerNorm(self.config.pair_channel)
# Embed templates into the pair activations.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13
if self.config.template.enabled:
self.channel_num['template_angle'] = 57
self.channel_num['template_pair'] = 88
self.template_embedding = TemplateEmbedding(
self.channel_num, self.config.template, self.global_config)
# ExtraMSAEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16
self.extra_msa_activations = Linear(
25, # 23 (20aa+unknown+gap+mask) + 1 (has_del) + 1 (del_val)
self.config.extra_msa_channel)
# Extra MSA Stack.
# Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack"
self.extra_msa_stack = nn.LayerList()
for _ in range(self.config.extra_msa_stack_num_block):
self.extra_msa_stack.append(EvoformerIteration(
self.channel_num, self.config.evoformer, self.global_config,
is_extra_msa=True))
# Embed templates torsion angles
if self.config.template.enabled and self.config.template.embed_torsion_angles:
c = self.config.msa_channel
self.template_single_embedding = Linear(
self.channel_num['template_angle'], c)
self.template_projection = Linear(c, c)
# Main trunk of the network
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18
self.evoformer_iteration = nn.LayerList()
for _ in range(self.config.evoformer_num_block):
self.evoformer_iteration.append(EvoformerIteration(
self.channel_num, self.config.evoformer, self.global_config,
is_extra_msa=False))
self.single_activations = Linear(
self.config.msa_channel, self.config.seq_channel)
def _pseudo_beta_fn(self, aatype, all_atom_positions, all_atom_masks):
gly_id = paddle.ones_like(aatype) * residue_constants.restype_order['G']
is_gly = paddle.equal(aatype, gly_id)
ca_idx = residue_constants.atom_order['CA']
cb_idx = residue_constants.atom_order['CB']
n = len(all_atom_positions.shape)
pseudo_beta = paddle.where(
paddle.tile(paddle.unsqueeze(is_gly, axis=-1),
[1] * len(is_gly.shape) + [3]),
paddle.squeeze(
all_atom_positions.slice([n-2], [ca_idx], [ca_idx+1]),
axis=-2),
paddle.squeeze(
all_atom_positions.slice([n-2], [cb_idx], [cb_idx+1]),
axis=-2))
if all_atom_masks is not None:
m = len(all_atom_masks)
pseudo_beta_mask = paddle.where(
is_gly,
paddle.squeeze(
all_atom_masks.slice([m-1], [ca_idx], [ca_idx+1]),
axis=-1),
paddle.squeeze(
all_atom_masks.slice([m-1], [cb_idx], [cb_idx+1]),
axis=-1))
pseudo_beta_mask = paddle.squeeze(pseudo_beta_mask, axis=-1)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def _create_extra_msa_feature(self, batch):
# 23: 20aa + unknown + gap + bert mask
msa_1hot = nn.functional.one_hot(batch['extra_msa'], 23)
msa_feat = [msa_1hot,
paddle.unsqueeze(batch['extra_has_deletion'], axis=-1),
paddle.unsqueeze(batch['extra_deletion_value'], axis=-1)]
return paddle.concat(msa_feat, axis=-1)
def forward(self, batch):
# InputEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 5
# Jumper et al. (2021) Suppl. Alg. 3 "InputEmbedder"
preprocess_1d = self.preprocess_1d(batch['target_feat'])
# preprocess_msa = self.preprocess_msa(batch['msa_feat'])
msa_activations = paddle.unsqueeze(preprocess_1d, axis=1) + \
self.preprocess_msa(batch['msa_feat'])
right_single = self.right_single(batch['target_feat']) # 1, n_res, 22 -> 1, n_res, 128
right_single = paddle.unsqueeze(right_single, axis=1) # 1, n_res, 128 -> 1, 1, n_res, 128
left_single = self.left_single(batch['target_feat']) # 1, n_res, 22 -> 1, n_res, 128
left_single = paddle.unsqueeze(left_single, axis=2) # 1, n_res, 128 -> 1, n_res, 1, 128
pair_activations = left_single + right_single
mask_2d = paddle.unsqueeze(batch['seq_mask'], axis=1) * paddle.unsqueeze(batch['seq_mask'], axis=2)
# Inject previous outputs for recycling.
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" line 6
# Jumper et al. (2021) Suppl. Alg. 32 "RecyclingEmbedder"
if self.config.recycle_pos and 'prev_pos' in batch:
prev_pseudo_beta = self._pseudo_beta_fn(
batch['aatype'], batch['prev_pos'], None)
dgram = dgram_from_positions(
prev_pseudo_beta, **self.config.prev_pos)
pair_activations += self.prev_pos_linear(dgram)
if self.config.recycle_features:
if 'prev_msa_first_row' in batch:
prev_msa_first_row = self.prev_msa_first_row_norm(
batch['prev_msa_first_row'])
# A workaround for `jax.ops.index_add`
msa_first_row = paddle.squeeze(msa_activations[:, 0, :], axis=1)
msa_first_row += prev_msa_first_row
msa_first_row = paddle.unsqueeze(msa_first_row, axis=1)
msa_activations = paddle.concat([msa_first_row, msa_activations[:, 1:, :]], axis=1)
if 'prev_pair' in batch:
pair_activations += self.prev_pair_norm(batch['prev_pair'])
# RelPosEmbedder
# Jumper et al. (2021) Suppl. Alg. 4 "relpos"
# Jumper et al. (2021) Suppl. Alg. 5 "one_hot"
if self.config.max_relative_feature:
pos = batch['residue_index'] # [bs, N_res]
offset = paddle.unsqueeze(pos, axis=[-1]) - \
paddle.unsqueeze(pos, axis=[-2])
rel_pos = nn.functional.one_hot(
paddle.clip(
offset + self.config.max_relative_feature,
min=0,
max=2 * self.config.max_relative_feature),
2 * self.config.max_relative_feature + 1)
rel_pos_bias = self.pair_activiations(rel_pos)
pair_activations += rel_pos_bias
# TemplateEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-13
if self.config.template.enabled:
template_batch = {k: batch[k] for k in batch
if k.startswith('template_')}
template_pair_repr = self.template_embedding(
pair_activations, template_batch, mask_2d)
pair_activations += template_pair_repr
# ExtraMSAEmbedder
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 14-16
extra_msa_feat = self._create_extra_msa_feature(batch)
extra_msa_activations = self.extra_msa_activations(extra_msa_feat)
# ==================================================
# Extra MSA Stack
# Jumper et al. (2021) Suppl. Alg. 18 "ExtraMsaStack"
# ==================================================
extra_msa_stack_input = {
'msa': extra_msa_activations,
'pair': pair_activations,
}
for idx, extra_msa_stack_iteration in enumerate(self.extra_msa_stack):
extra_msa_act, extra_pair_act = recompute_wrapper(extra_msa_stack_iteration,
extra_msa_stack_input['msa'],
extra_msa_stack_input['pair'],
{'msa': batch['extra_msa_mask'],
'pair': mask_2d},
is_recompute=self.training and idx >= self.config.extra_msa_stack_recompute_start_block_index)
extra_msa_stack_output = {
'msa': extra_msa_act,
'pair': extra_pair_act}
extra_msa_stack_input = {
'msa': extra_msa_stack_output['msa'],
'pair': extra_msa_stack_output['pair']}
evoformer_input = {
'msa': msa_activations,
'pair': extra_msa_stack_output['pair'],
}
evoformer_masks = {
'msa': batch['msa_mask'],
'pair': mask_2d,
}
# ==================================================
# Template angle feat
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 7-8
# ==================================================
if self.config.template.enabled and self.config.template.embed_torsion_angles:
num_templ, num_res = batch['template_aatype'].shape[1:]
aatype_one_hot = nn.functional.one_hot(batch['template_aatype'], 22)
# Embed the templates aatype, torsion angles and masks.
# Shape (templates, residues, msa_channels)
ret = all_atom.atom37_to_torsion_angles(
aatype=batch['template_aatype'],
all_atom_pos=batch['template_all_atom_positions'],
all_atom_mask=batch['template_all_atom_masks'],
# Ensure consistent behaviour during testing:
placeholder_for_undefined=not self.global_config.zero_init)
template_features = paddle.concat([
aatype_one_hot,
paddle.reshape(ret['torsion_angles_sin_cos'],
[-1, num_templ, num_res, 14]),
paddle.reshape(ret['alt_torsion_angles_sin_cos'],
[-1, num_templ, num_res, 14]),
ret['torsion_angles_mask']], axis=-1)
template_activations = self.template_single_embedding(
template_features)
template_activations = nn.functional.relu(template_activations)
template_activations = self.template_projection(template_activations)
# Concatenate the templates to the msa.
evoformer_input['msa'] = paddle.concat(
[evoformer_input['msa'], template_activations], axis=1)
# Concatenate templates masks to the msa masks.
# Use mask from the psi angle, as it only depends on the backbone atoms
# from a single residue.
torsion_angle_mask = ret['torsion_angles_mask'][..., 2]
torsion_angle_mask = torsion_angle_mask.astype(
evoformer_masks['msa'].dtype)
evoformer_masks['msa'] = paddle.concat(
[evoformer_masks['msa'], torsion_angle_mask], axis=1)
# ==================================================
# Main MSA Stack
# Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 17-18
# ==================================================
for idx, evoformer_block in enumerate(self.evoformer_iteration):
msa_act, pair_act = recompute_wrapper(evoformer_block,
evoformer_input['msa'],
evoformer_input['pair'],
evoformer_masks,
is_recompute=self.training and idx >= self.config.evoformer_recompute_start_block_index)
evoformer_output = {
'msa': msa_act,
'pair': pair_act}
evoformer_input = {
'msa': evoformer_output['msa'],
'pair': evoformer_output['pair'],
}
msa_activations = evoformer_output['msa']
pair_activations = evoformer_output['pair']
single_activations = self.single_activations(msa_activations[:, 0])
num_seq = batch['msa_feat'].shape[1]
output = {
'single': single_activations,
'pair': pair_activations,
# Crop away template rows such that they are not used
# in MaskedMsaHead.
'msa': msa_activations[:, :num_seq],
'msa_first_row': msa_activations[:, 0],
}
return output
class OuterProductMean(nn.Layer):
"""Computes mean outer product.
Jumper et al. (2021) Suppl. Alg. 10 "OuterProductMean"
"""
def __init__(self, channel_num, config, global_config, is_extra_msa, name='outer_product_mean'):
super(OuterProductMean, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
if is_extra_msa:
c_m = channel_num['extra_msa_channel']
else:
c_m = channel_num['msa_channel']
self.layer_norm_input = nn.LayerNorm(c_m, name='layer_norm_input')
self.left_projection = Linear(
c_m, self.config.num_outer_channel, name='left_projection')
self.right_projection = Linear(
c_m, self.config.num_outer_channel, name='right_projection')
if self.global_config.zero_init:
init_w = nn.initializer.Constant(value=0.0)
else:
init_w = nn.initializer.KaimingNormal()
self.output_w = paddle.create_parameter(
[self.config.num_outer_channel, self.config.num_outer_channel, channel_num['pair_channel']],
'float32', default_initializer=init_w)
self.output_b = paddle.create_parameter(
[channel_num['pair_channel']], 'float32',
default_initializer=nn.initializer.Constant(value=0.0))
def forward(self, act, mask):
"""Builds OuterProductMean module.
Arguments:
act: MSA representation, shape [batch, N_seq, N_res, c_m].
mask: MSA mask, shape [batch, N_seq, N_res].
Returns:
Update to pair representation, shape [batch, N_res, N_res, c_z].
"""
act = self.layer_norm_input(act)
right_act = self.right_projection(act)
left_act = self.left_projection(act)
mask = paddle.unsqueeze(mask, axis=-1)
left_act = mask * left_act
epsilon = 1e-3
norm = paddle.einsum('nabc,nadc->nbdc', mask, mask) + epsilon
def fast_einsum(equation, left_act, right_act):
assert equation == "nacb,nade->ndceb"
tmp = paddle.matmul(
x=paddle.reshape(right_act, [right_act.shape[0], right_act.shape[1], -1]), # na(de)
y=paddle.reshape(left_act, [left_act.shape[0], left_act.shape[1], -1]), # na(cb)
transpose_x=True,
transpose_y=False) # n(de)(cb)
tmp = paddle.reshape(tmp, [left_act.shape[0], right_act.shape[2], right_act.shape[3], left_act.shape[2], left_act.shape[3]])
out = paddle.transpose(tmp, perm=[0, 1, 3, 2, 4])
return out
def compute_chunk(left_act, right_act):
# This is equivalent to
#
# act = jnp.einsum('abc,ade->dceb', left_act, right_act)
# act = jnp.einsum('dceb,cef->bdf', act, output_w) + output_b
#
# but faster. maybe for subbatch inference?
left_act = left_act.transpose([0, 1, 3, 2])
act = fast_einsum('nacb,nade->ndceb', left_act, right_act)
act = paddle.einsum('ndceb,cef->ndbf', act, self.output_w) + self.output_b
return act.transpose([0, 2, 1, 3])
if not self.training:
# low memory mode using subbatch
sb_chunk = subbatch(compute_chunk, [0], [2],
self.config.chunk_size, 1)
act = sb_chunk(left_act, right_act)
else:
act = compute_chunk(left_act, right_act)
act = act / norm
return act
class TriangleAttention(nn.Layer):
"""Triangle Attention.
Jumper et al. (2021) Suppl. Alg. 13 "TriangleAttentionStartingNode"
Jumper et al. (2021) Suppl. Alg. 14 "TriangleAttentionEndingNode"
"""
def __init__(self, channel_num, config, global_config, name='triangle_attention'):
super(TriangleAttention, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
assert config.orientation in ['per_row', 'per_column']
self.query_norm = nn.LayerNorm(channel_num['pair_channel'],
name='query_norm')
self.feat_2d_weights = paddle.create_parameter(
[channel_num['pair_channel'], self.config.num_head], 'float32',
default_initializer=nn.initializer.Normal(
std=1. / np.sqrt(channel_num['pair_channel'])))
self.attention = Attention(self.config, self.global_config,
channel_num['pair_channel'], channel_num['pair_channel'],
channel_num['pair_channel'])
def forward(self, pair_act, pair_mask):
"""Builds TriangleAttention module.
Arguments:
pair_act: [batch, N_res, N_res, c_z] pair activations tensor
pair_mask: [batch, N_res, N_res] mask of non-padded regions in the tensor.
Returns:
Update to pair_act, shape [batch, N_res, N_res, c_z].
"""
if self.config.orientation == 'per_column':
pair_act = pair_act.transpose([0, 2, 1, 3])
pair_mask = pair_mask.transpose([0, 2, 1])
bias = 1e9 * (pair_mask - 1.)
bias = paddle.unsqueeze(bias, axis=[2, 3])
pair_act = self.query_norm(pair_act)
nonbatched_bias = paddle.einsum('bqkc,ch->bhqk', pair_act, self.feat_2d_weights)
if not self.training:
# low memory mode using subbatch
sb_attn = subbatch(self.attention, [0, 1, 2], [1, 1, 1],
self.global_config.subbatch_size, 1, same_arg_idx={1: 0})
pair_act = sb_attn(pair_act, pair_act, bias, nonbatched_bias)
else:
pair_act = self.attention(pair_act, pair_act, bias, nonbatched_bias)
if self.config.orientation == 'per_column':
pair_act = pair_act.transpose([0, 2, 1, 3])
return pair_act
class TriangleMultiplication(nn.Layer):
"""Triangle multiplication layer ("outgoing" or "incoming").
Jumper et al. (2021) Suppl. Alg. 11 "TriangleMultiplicationOutgoing"
Jumper et al. (2021) Suppl. Alg. 12 "TriangleMultiplicationIncoming"
"""
def __init__(self, channel_num, config, global_config, name='triangle_multiplication'):
super(TriangleMultiplication, self).__init__()
self.channel_num = channel_num
self.config = config
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.layer_norm_input = nn.LayerNorm(self.channel_num['pair_channel'], name='layer_norm_input')
self.left_projection = Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='left_projection')
self.right_projection = Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='right_projection')
self.left_gate = Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='left_gate')
init_gate_linear(self.left_gate)
self.right_gate = Linear(self.channel_num['pair_channel'],
self.config.num_intermediate_channel, name='right_gate')
init_gate_linear(self.right_gate)
# line 4
self.center_layer_norm = nn.LayerNorm(self.config.num_intermediate_channel, name='center_layer_norm')
self.output_projection = Linear(self.config.num_intermediate_channel,
self.channel_num['pair_channel'], name='output_projection')
init_final_linear(self.output_projection)
# line 3
self.gating_linear = Linear(self.channel_num['pair_channel'],
self.channel_num['pair_channel'], name='output_projection')
init_gate_linear(self.gating_linear)
def forward(self, act, mask):
"""Builds TriangleMultiplication module.
Arguments:
act: Pair activations, shape [batch, N_res, N_res, c_z]
mask: Pair mask, shape [batch, N_res, N_res].
Returns:
Outputs, same shape/type as act.
"""
mask = paddle.unsqueeze(mask, axis=-1) # [batch, N_res, N_res, 1]
act = self.layer_norm_input(act) # line 1
left_proj_act = mask * self.left_projection(act)
right_proj_act = mask * self.right_projection(act)
left_gate_values = nn.functional.sigmoid(self.left_gate(act))
right_gate_values = nn.functional.sigmoid(self.right_gate(act))
left_proj_act = left_proj_act * left_gate_values
right_proj_act = right_proj_act * right_gate_values
# "Outgoing" edges equation: 'ikc,jkc->ijc'
# "Incoming" edges equation: 'kjc,kic->ijc'
# Note on the Suppl. Alg. 11 & 12 notation:
# For the "outgoing" edges, a = left_proj_act and b = right_proj_act
# For the "incoming" edges, it's swapped:
# b = left_proj_act and a = right_proj_act
gate_values = nn.functional.sigmoid(self.gating_linear(act)) # line 3
if self.config.equation == 'ikc,jkc->ijc':
# Outgoing
dim, out_idx = 1, 1
equation = 'bikc,bjkc->bijc'
elif self.config.equation == 'kjc,kic->ijc':
# Incoming
dim, out_idx = 2, 2
equation = 'bkjc,bkic->bijc'
else:
raise ValueError('unknown equation.')
if not self.training:
einsum_fn = subbatch(paddle.einsum, [1], [dim],
self.global_config.subbatch_size, out_idx)
act = einsum_fn(equation, left_proj_act, right_proj_act)
else:
# Outgoing equation = 'bikc,bjkc->bijc'
# Incoming equation = 'bkjc,bkic->bijc'
act = paddle.einsum(equation, left_proj_act, right_proj_act)
act = self.center_layer_norm(act)
act = self.output_projection(act)
act = act * gate_values
return act
class TemplatePair(nn.Layer):
"""Pair processing for the templates.
Jumper et al. (2021) Suppl. Alg. 16 "TemplatePairStack" lines 2-6
"""
def __init__(self, channel_num, config, global_config):
super(TemplatePair, self).__init__()
self.config = config
self.global_config = global_config
channel_num = {}
channel_num['pair_channel'] = self.config.triangle_attention_ending_node.value_dim
self.triangle_attention_starting_node = TriangleAttention(channel_num,
self.config.triangle_attention_starting_node, self.global_config,
name='triangle_attention_starting_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_starting_node)
self.triangle_starting_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_attention_ending_node = TriangleAttention(channel_num,
self.config.triangle_attention_ending_node, self.global_config,
name='triangle_attention_ending_node')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_attention_ending_node)
self.triangle_ending_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_multiplication_outgoing = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_outgoing, self.global_config,
name='triangle_multiplication_outgoing')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_outgoing)
self.triangle_outgoing_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.triangle_multiplication_incoming = TriangleMultiplication(channel_num,
self.config.triangle_multiplication_incoming, self.global_config,
name='triangle_multiplication_incoming')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.triangle_multiplication_incoming)
self.triangle_incoming_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
self.pair_transition = Transition(channel_num, self.config.pair_transition,
self.global_config, is_extra_msa=False,
transition_type='pair_transition')
dropout_rate, dropout_axis = self._parse_dropout_params(
self.pair_transition)
self.pair_transition_dropout = nn.Dropout(dropout_rate, axis=dropout_axis) \
if not self.global_config.use_dropout_nd else Dropout(dropout_rate, axis=dropout_axis)
def _parse_dropout_params(self, module):
dropout_rate = 0.0 if self.global_config.deterministic else \
module.config.dropout_rate
dropout_axis = None
if module.config.shared_dropout:
dropout_axis = {
'per_row': [0, 2, 3],
'per_column': [0, 1, 3],
}[module.config.orientation]
return dropout_rate, dropout_axis
def forward(self, pair_act, pair_mask):
"""Builds one block of TemplatePair module.
Arguments:
pair_act: Pair activations for single template, shape [batch, N_res, N_res, c_t].
pair_mask: Pair mask, shape [batch, N_res, N_res].
Returns:
Updated pair_act, shape [batch, N_res, N_res, c_t].
"""
residual = self.triangle_attention_starting_node(pair_act, pair_mask)
residual = self.triangle_starting_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_attention_ending_node(pair_act, pair_mask)
residual = self.triangle_ending_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_outgoing(pair_act, pair_mask)
residual = self.triangle_outgoing_dropout(residual)
pair_act = pair_act + residual
residual = self.triangle_multiplication_incoming(pair_act, pair_mask)
residual = self.triangle_incoming_dropout(residual)
pair_act = pair_act + residual
residual = self.pair_transition(pair_act, pair_mask)
residual = self.pair_transition_dropout(residual)
pair_act = pair_act + residual
return pair_act
class SingleTemplateEmbedding(nn.Layer):
"""Embeds a single template.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9+11
"""
def __init__(self, channel_num, config, global_config):
super(SingleTemplateEmbedding, self).__init__()
self.config = config
self.channel_num = channel_num
self.global_config = global_config
Linear = paddle.incubate.nn.FusedLinear if self.global_config.fuse_linear else paddle.nn.Linear
self.embedding2d = Linear(channel_num['template_pair'],
self.config.template_pair_stack.triangle_attention_ending_node.value_dim)
self.template_pair_stack = nn.LayerList()
for _ in range(self.config.template_pair_stack.num_block):
self.template_pair_stack.append(TemplatePair(
self.channel_num, self.config.template_pair_stack, self.global_config))
self.output_layer_norm = nn.LayerNorm(self.config.attention.key_dim)
def forward(self, query_embedding, batch, mask_2d):
"""Build the single template embedding.
Arguments:
query_embedding: Query pair representation, shape [batch, N_res, N_res, c_z].
batch: A batch of template features (note the template dimension has been
stripped out as this module only runs over a single template).
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
Returns:
A template embedding [N_res, N_res, c_z].
"""
assert mask_2d.dtype == query_embedding.dtype, f"mask_2d.dtype ({mask_2d.dtype}) is not the same with query_embedding.dtype ({query_embedding.dtype})!"
dtype = query_embedding.dtype
num_res = batch['template_aatype'].shape[1]
template_mask = batch['template_pseudo_beta_mask']
# template_mask[..., None] * template_mask[..., None, :]
template_mask_2d = template_mask.unsqueeze(axis=-1) * template_mask.unsqueeze(axis=-2)
template_mask_2d = template_mask_2d.astype(dtype)
template_dgram = dgram_from_positions(
batch['template_pseudo_beta'],
**self.config.dgram_features)
template_dgram = template_dgram.astype(dtype)
aatype = nn.functional.one_hot(batch['template_aatype'], 22)
aatype = aatype.astype(dtype)
to_concat = [template_dgram, template_mask_2d.unsqueeze(axis=-1)]
to_concat.append(paddle.tile(aatype.unsqueeze(axis=-3), # aatype[..., None, :, :],
[1, num_res, 1, 1]))
to_concat.append(paddle.tile(aatype.unsqueeze(axis=-2), # aatype[..., None, :],
[1, 1, num_res, 1]))
n, ca, c = [residue_constants.atom_order[a]
for a in ('N', 'CA', 'C')]
rot, trans = quat_affine.make_transform_from_reference(
n_xyz=batch['template_all_atom_positions'][..., n, :],
ca_xyz=batch['template_all_atom_positions'][..., ca, :],
c_xyz=batch['template_all_atom_positions'][..., c, :])
affines = quat_affine.QuatAffine(
quaternion=quat_affine.rot_to_quat(rot),
translation=trans,
rotation=rot)
points = [paddle.unsqueeze(x, axis=-2) for x in
paddle.unstack(affines.translation, axis=-1)]
affine_vec = affines.invert_point(points, extra_dims=1)
inv_distance_scalar = paddle.rsqrt(
1e-6 + sum([paddle.square(x) for x in affine_vec]))
# Backbone affine mask: whether the residue has C, CA, N
# (the template mask defined above only considers pseudo CB).
template_mask = (
batch['template_all_atom_masks'][..., n] *
batch['template_all_atom_masks'][..., ca] *
batch['template_all_atom_masks'][..., c])
# template_mask[..., None] * template_mask[..., None, :]
template_mask_2d = template_mask.unsqueeze(axis=-1) * template_mask.unsqueeze(axis=-2)
inv_distance_scalar *= template_mask_2d.astype(inv_distance_scalar.dtype)
unit_vector = [(x * inv_distance_scalar).unsqueeze(axis=-1) for x in affine_vec]
unit_vector = [x.astype(dtype) for x in unit_vector]
if not self.config.use_template_unit_vector:
unit_vector = [paddle.zeros_like(x) for x in unit_vector]
to_concat.extend(unit_vector)
template_mask_2d = template_mask_2d.astype(dtype)
to_concat.append(template_mask_2d.unsqueeze(axis=-1))
act = paddle.concat(to_concat, axis=-1)
# Mask out non-template regions so we don't get arbitrary values in the
# distogram for these regions.
act *= template_mask_2d.unsqueeze(axis=-1)
act = self.embedding2d(act)
for idx, pair_encoder in enumerate(self.template_pair_stack):
act = recompute_wrapper(pair_encoder, act, mask_2d,
is_recompute=self.training and idx >= self.config.template_pair_stack.recompute_start_block_index)
act = self.output_layer_norm(act)
return act
class TemplateEmbedding(nn.Layer):
"""Embeds a set of templates.
Jumper et al. (2021) Suppl. Alg. 2 "Inference" lines 9-12
Jumper et al. (2021) Suppl. Alg. 17 "TemplatePointwiseAttention"
"""
def __init__(self, channel_num, config, global_config):
super(TemplateEmbedding, self).__init__()
self.config = config
self.global_config = global_config
self.single_template_embedding = SingleTemplateEmbedding(
channel_num, config, global_config)
self.attention = Attention(
config.attention, global_config,
channel_num['pair_channel'],
config.attention.key_dim,
channel_num['pair_channel'])
def forward(self, query_embedding, template_batch, mask_2d):
"""Build TemplateEmbedding module.
Arguments:
query_embedding: Query pair representation, shape [n_batch, N_res, N_res, c_z].
template_batch: A batch of template features.
mask_2d: Padding mask (Note: this doesn't care if a template exists,
unlike the template_pseudo_beta_mask).
Returns:
A template embedding [n_batch, N_res, N_res, c_z].
"""
num_templates = template_batch['template_mask'].shape[1]
num_channels = (self.config.template_pair_stack
.triangle_attention_ending_node.value_dim)
num_res = query_embedding.shape[1]
dtype = query_embedding.dtype
template_mask = template_batch['template_mask']
template_mask = template_mask.astype(dtype)
query_channels = query_embedding.shape[-1]
outs = []
for i in range(num_templates):
# By default, num_templates = 4
batch0 = {k: paddle.squeeze(v.slice([1], [i], [i+1]), axis=1)
for k, v in template_batch.items()}
outs.append(self.single_template_embedding(
query_embedding, batch0, mask_2d))
template_pair_repr = paddle.stack(outs, axis=1)
flat_query = paddle.reshape(
query_embedding, [-1, num_res * num_res, 1, query_channels])
flat_templates = paddle.reshape(
paddle.transpose(template_pair_repr, [0, 2, 3, 1, 4]),
[-1, num_res * num_res, num_templates, num_channels])
bias = 1e9 * (template_mask[:, None, None, None, :] - 1.)
if not self.training:
sb_attn = subbatch(self.attention, [0, 1], [1, 1],
self.config.subbatch_size, 1)
emb = sb_attn(flat_query, flat_templates, bias)
else:
emb = self.attention(flat_query, flat_templates, bias)
emb = paddle.reshape(
emb, [-1, num_res, num_res, query_channels])
# No gradients if no templates.
emb *= (paddle.sum(template_mask) > 0.).astype(emb.dtype)
return emb
| 99,940 | 41.080421 | 159 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold/alphafold_paddle/model/utils.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils."""
import os
import numbers
import functools
import collections
import paddle
import numpy as np
from typing import Any, Mapping
from alphafold_paddle.common import protein
from alphafold_paddle.common import confidence
def jax_params_to_paddle(params):
"""
Rule 1: alphafold/alphafold_iteration/evoformer/template_embedding/single_template_embedding/template_pair_stack/* ==>
'...template_pair_stack.0.*'
'...template_pair_stack.1.*'
...
Rule 2: alphafold/alphafold_iteration/evoformer/extra_msa_stack/* ==>
'alphafold_iteration.evoformer.extra_msa_stack.0.*',
'alphafold_iteration.evoformer.extra_msa_stack.1.*',
...
Rule 3: alphafold/alphafold_iteration/evoformer/evoformer_iteration/* ==>
'alphafold.alphafold_iteration.evoformer.evoformer_iteration.0.*',
'alphafold.alphafold_iteration.evoformer.evoformer_iteration.1.*',
...
Rule 4: */__layer_stack_no_state/* ==> '*.*'
Rule 5: *//weights ==> '*.weight'
Rule 6: *//bias ==> '*.bias'
Rule 7: *//scale ==> '*.weight'
Rule 8: *//offset ==> '*.bias'
"""
rule_1_prefix = 'alphafold/alphafold_iteration/evoformer/template_embedding/single_template_embedding/template_pair_stack/'
rule_2_prefix = 'alphafold/alphafold_iteration/evoformer/extra_msa_stack/'
rule_3_prefix = 'alphafold/alphafold_iteration/evoformer/evoformer_iteration/'
rule_4_prefix = '__layer_stack_no_state/'
pd_params = dict()
def _parse_stack_or_iteration(rule_prefix, k):
n = params[k].shape[0]
suffix = k[len(rule_prefix):]
# rule 4
if suffix.startswith(rule_4_prefix):
suffix = suffix[len(rule_4_prefix):]
# rule 5
suffix = suffix.replace('//weights', '.weight')
# rule 6
suffix = suffix.replace('//bias', '.bias')
# rule 7
suffix = suffix.replace('//scale', '.weight')
# rule 8
suffix = suffix.replace('//offset', '.bias')
suffix = suffix.replace('//', '.')
suffix = suffix.replace('/', '.')
prefix = rule_prefix.replace('/', '.')
for i in range(n):
k_ = f'{prefix}{i}.{suffix}'
pd_params[k_] = np.copy(params[k][i])
for k in params.keys():
if k.startswith(rule_1_prefix):
_parse_stack_or_iteration(rule_1_prefix, k)
elif k.startswith(rule_2_prefix):
_parse_stack_or_iteration(rule_2_prefix, k)
elif k.startswith(rule_3_prefix):
_parse_stack_or_iteration(rule_3_prefix, k)
else:
k_ = k.replace('//weights', '.weight')
k_ = k_.replace('//scale', '.weight')
k_ = k_.replace('//offset', '.bias')
k_ = k_.replace('//', '.')
k_ = k_.replace('/', '.')
pd_params[k_] = np.copy(params[k])
return pd_params
def slice_batch(batch, i):
b = {k: v[i] for k, v in batch.items()}
return b
def add_batch_dim(batch):
b = {k: v[None,] for k, v in batch.items()}
return b
def map_to_tensor(batch, add_batch=False):
if add_batch:
batch = add_batch_dim(batch)
b = {k: paddle.to_tensor(v) for k, v in batch.items()}
return b
def mask_mean(mask, value, axis=None, drop_mask_channel=False, eps=1e-10):
if drop_mask_channel:
mask = mask[:, 0]
mask_shape = mask.shape
value_shape = value.shape
assert len(mask_shape) == len(value_shape)
if isinstance(axis, numbers.Integral):
axis = [axis]
elif axis is None:
axis = list(range(len(mask_shape)))
assert isinstance(axis, collections.abc.Iterable), \
'axis needs to be either an iterable, integer or "None"'
broadcast_factor = 1.
for axis_ in axis:
value_size = value_shape[axis_]
mask_size = mask_shape[axis_]
if mask_size == 1:
broadcast_factor *= value_size
else:
assert mask_size == value_size
return (paddle.sum(mask * value, axis=axis) /
(paddle.sum(mask, axis=axis) * broadcast_factor + eps))
def batched_gather(params, indices, axis=0, batch_dims=0):
# Implement gather with batching, like tensorflow:
# https://www.tensorflow.org/api_docs/python/tf/gather#batching
# print(params.shape, indices.shape, axis)
p, i = params, indices
rank = len(p.shape)
axis = (rank + axis) % rank
# The stride of axis
stride = p.shape[batch_dims + axis]
if batch_dims == 0 and len(i.shape) == 1:
return paddle.gather(p, i, axis=axis)
elif batch_dims == 0:
flat_i = i.reshape([-1])
gathered = paddle.gather(p, flat_i, axis=axis)
shape = p.shape[:axis] + i.shape
if axis < rank - 1:
shape += params.shape[axis + 1:]
return gathered.reshape(shape)
b = batch_dims
a = axis
assert p.shape[:b] == i.shape[:b]
bn = np.prod(p.shape[:b])
# Shift batch dimensions right to bundle with axis
if a > 0:
perm = list(range(rank))
perm = perm[b:(b + a)] + perm[:b] + perm[(b + a):]
p = p.transpose(perm)
# Merge params' batch+axis
p = p.reshape(p.shape[:a] + [-1] + p.shape[(b + a + 1):])
# indices = [Batch..., Index...]
# Expand the index values across batch elements
strides = paddle.arange(bn, dtype="int64").unsqueeze(-1) * stride
i = i.reshape([bn, -1])
flat_i = paddle.flatten(i + strides)
# Do gather
gathered = paddle.gather(p, flat_i, axis=axis)
# Unbundle batch and index dimensions
unbundled_shape = p.shape[:a] + indices.shape + p.shape[a + 1:]
gathered = gathered.reshape(unbundled_shape)
# Shift batch dimensions back to the left
if a > 0:
perm = list(range(len(unbundled_shape)))
perm = perm[a:(a + b)] + perm[:a] + perm[(a + b):]
gathered = gathered.transpose(perm)
return gathered
def subbatch(f, arg_idx, dim, bs, out_idx, same_arg_idx={}):
""" Converts a function to one that applies to subbatch of an input
dimension.
Args:
f(Callable): original function.
arg_idx([int]): indices of the inputs to be subbatched.
dim([int]): index of the dimension to be subbatched.
bs(int): subbatch size.
out_idx(int): index of the output dimension that needs stacking
same_arg_idx(dict), optional: index of same arg mapping. e.g {1: 0} means arg[1] == arg[0],
we assign _args[1] = _args[0] avoiding slice repeatly.
Returns:
converted function.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
assert len(arg_idx) == len(dim), f'Number of batching args and number of batching dims should match.'
inps = [args[i] for i in arg_idx]
dim_width = [inp.shape[d] for inp, d in zip(inps, dim)]
assert len(set(dim_width)) == 1, f'Batch sizes should be kept equal.'
inp_dim = {inp: d for inp, d in zip(inps, dim)}
dim_width = dim_width[0]
if dim_width < bs:
return f(*args, **kwargs)
outs = []
for slice_at in np.arange(0, dim_width, bs):
_args = []
for i, inp in enumerate(args):
if i in same_arg_idx:
assert i > same_arg_idx[i], f"expect i > same_arg_idx[i], but got i: {i} and same_arg_idx[i]: {same_arg_idx[i]}"
_args.append(_args[same_arg_idx[i]])
elif i in arg_idx:
inp = inp.slice([inp_dim[inp]], [slice_at], [slice_at + bs])
_args.append(inp)
else:
_args.append(inp)
outs.append(f(*_args, **kwargs))
return paddle.concat(outs, out_idx)
return wrapper
def get_confidence_metrics(
prediction_result: Mapping[str, Any]) -> Mapping[str, Any]:
"""Post processes prediction_result to get confidence metrics."""
confidence_metrics = {}
confidence_metrics['plddt'] = confidence.compute_plddt(
prediction_result['predicted_lddt']['logits'])
if 'predicted_aligned_error' in prediction_result:
confidence_metrics.update(confidence.compute_predicted_aligned_error(
prediction_result['predicted_aligned_error']['logits'],
prediction_result['predicted_aligned_error']['breaks']))
confidence_metrics['ptm'] = confidence.predicted_tm_score(
prediction_result['predicted_aligned_error']['logits'],
prediction_result['predicted_aligned_error']['breaks'])
return confidence_metrics
def generate_unrelaxed_pdb(aatype, residue_index, model_output, pdb_path,
b_factors=None):
fold_output = model_output['structure_module']
if b_factors is None:
b_factors = np.zeros_like(fold_output['final_atom_mask'])
# NOTE: for single protein, chain_index is always 'A' (idx:0)
prot = protein.Protein(
aatype=aatype,
atom_positions=fold_output['final_atom_positions'],
atom_mask=fold_output['final_atom_mask'],
residue_index=residue_index + 1,
chain_index=np.zeros(aatype.shape),
b_factors=b_factors)
with open(pdb_path, 'w') as f:
f.write(protein.to_pdb(prot))
return prot
def set_tensor_constant(tensor, constant):
tensor.set_value(paddle.full_like(tensor, constant))
def init_gate_linear(linear):
set_tensor_constant(linear.weight, 0)
set_tensor_constant(linear.bias, 1)
def init_final_linear(linear):
set_tensor_constant(linear.weight, 0)
| 10,211 | 31.626198 | 132 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold/alphafold_paddle/model/model.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model."""
import os
import io
import time
import pickle
import logging
import pathlib
import numpy as np
import ml_collections
from copy import deepcopy
from typing import Dict, Optional
import paddle
from alphafold_paddle.model import utils
from alphafold_paddle.relax import relax
from alphafold_paddle.model import modules
from alphafold_paddle.common import protein
from alphafold_paddle.common import residue_constants
from utils.param_fuse import get_fused_param_groups
try:
import tensorflow.compat.v1 as tf
from alphafold_paddle.data.tf_input import input_pipeline
from alphafold_paddle.data.tf_input import proteins_dataset
USE_TF = True
except Exception:
from alphafold_paddle.data.input import input_pipeline
USE_TF = False
logger = logging.getLogger(__name__)
TARGET_FEAT_DIM = 22
MSA_FEAT_DIM = 49
def print_shape(d, level=0):
tabs = '\t' * level
for k, v in d.items():
if type(v) is dict:
print(tabs + k)
print_shape(v, level=level+1)
else:
print(tabs + f'{k}: {v.shape} {v.dtype}')
def tensor_to_numpy(pred_dict):
for k in pred_dict.keys():
if isinstance(pred_dict[k], paddle.Tensor):
pred_dict[k] = pred_dict[k].numpy()
elif type(pred_dict[k]) is dict:
tensor_to_numpy(pred_dict[k])
def slice_pred_dict(pred_dict, slice_idx, ignores=['breaks', 'traj', 'sidechains']):
for k in pred_dict.keys():
if k in ignores:
continue
if type(pred_dict[k]) is dict:
pred_dict[k] = slice_pred_dict(pred_dict[k], slice_idx,
ignores=ignores)
else:
pred_dict[k] = pred_dict[k][slice_idx]
return pred_dict
class RunModel(object):
"""Wrapper for paddle model."""
def __init__(self,
name: str,
config: ml_collections.ConfigDict,
params_path: str,
dynamic_subbatch_size: bool = True):
self.name = name
self.config = config
self.dynamic_subbatch_size = dynamic_subbatch_size
channel_num = {
'target_feat': TARGET_FEAT_DIM,
'msa_feat': MSA_FEAT_DIM,
}
self.alphafold = modules.AlphaFold(channel_num, config.model)
# just only fuse param for inference
fused_parameters = get_fused_param_groups(self.alphafold, config.model.global_config.get('dist_model', False))
self.init_params(str(params_path))
self.alphafold.eval()
def init_params(self, params_path: str):
if params_path.endswith('.npz'):
with open(params_path, 'rb') as f:
params = np.load(io.BytesIO(f.read()), allow_pickle=False)
params = dict(params)
pd_params = utils.jax_params_to_paddle(params)
pd_params = {k[len('alphafold.'):]: v for k, v in pd_params.items()}
from collections import defaultdict
qkv_dicts = defaultdict(dict)
if self.config.model.global_config.fuse_attention:
for key in pd_params:
if 'msa_column_global_attention' not in key and 'attention' in key and ('query_w' in key or 'key_w' in key or 'value_w' in key):
prefix = key[:key.rfind('.')]
if 'extra_msa_stack' in key:
qkv_dicts[prefix][key] = pd_params[key]
#print(key)
elif 'evoformer_iteration' in key:
qkv_dicts[prefix][key] = pd_params[key]
#print(key)
elif 'template_pair_stack' in key:
qkv_dicts[prefix][key] = pd_params[key]
#print(key)
for prefix in qkv_dicts:
query_w = qkv_dicts[prefix][prefix + '.query_w']
key_w = qkv_dicts[prefix][prefix + '.key_w']
value_w = qkv_dicts[prefix][prefix + '.value_w']
if query_w.shape[0] == key_w.shape[0] and key_w.shape[0] == value_w.shape[0]:
# 1. merge to [3, num_head, key_dim, q_dim]
qkv_w = np.stack([query_w, key_w, value_w], axis=0).transpose((0, 2, 3, 1))
# 2. remove seperated param
del pd_params[prefix + '.query_w']
del pd_params[prefix + '.key_w']
del pd_params[prefix + '.value_w']
# 3. add merged param to pd_params
pd_params[prefix + '.qkv_w'] = qkv_w
elif params_path.endswith('.pdparams'):
logger.info('Load as Paddle model')
pd_params = paddle.load(params_path)
else:
raise ValueError('Unsupported params file type')
self.alphafold.set_state_dict(pd_params)
def preprocess(self,
raw_features: Dict[str, np.ndarray],
random_seed: int,
pkl: pathlib.Path = None) -> Dict[str, paddle.Tensor]:
"""Convert raw input features to model input features"""
if pkl is not None and pkl.exists():
logger.info(f'Use cached {pkl}')
with open(pkl, 'rb') as f:
features = pickle.load(f)
print('########## feature shape ##########')
print_shape(features)
return utils.map_to_tensor(features, add_batch=True)
print('Processing input features')
data_config = deepcopy(self.config.data)
feature_names = data_config.common.unsupervised_features
if data_config.common.use_templates:
feature_names += data_config.common.template_features
num_residues = int(raw_features['seq_length'][0])
data_config.eval.crop_size = num_residues
if 'deletion_matrix_int' in raw_features:
raw_features['deletion_matrix'] = (raw_features.pop(
'deletion_matrix_int').astype(np.float32))
if USE_TF:
data_config.eval.delete_msa_block = False
tf_graph = tf.Graph()
with tf_graph.as_default(), tf.device('/device:CPU:0'):
tf.compat.v1.set_random_seed(random_seed)
tensor_dict = proteins_dataset.np_to_tensor_dict(
np_example=raw_features, features=feature_names)
processed_batch = input_pipeline.process_tensors_from_config(
tensor_dict, data_config)
tf_graph.finalize()
with tf.Session(graph=tf_graph) as sess:
features = sess.run(processed_batch)
else:
array_dict = input_pipeline.np_to_array_dict(
np_example=raw_features, features=feature_names,
use_templates=data_config.common.use_templates)
features = input_pipeline.process_arrays_from_config(
array_dict, data_config)
features = {k: v for k, v in features.items() if v.dtype != 'O'}
extra_msa_length = data_config.common.max_extra_msa
for k in ['extra_msa', 'extra_has_deletion', 'extra_deletion_value',
'extra_msa_mask']:
features[k] = features[k][:, :extra_msa_length]
for k in features.keys():
if features[k].dtype == np.int64:
features[k] = features[k].astype(np.int32)
elif features[k].dtype == np.float64:
features[k] = features[k].astype(np.float32)
if pkl is not None:
with open(pkl, 'wb') as f:
pickle.dump(features, f, protocol=4)
print('Preprocessesing finished')
print('########## feature shape ##########')
print_shape(features)
return utils.map_to_tensor(features, add_batch=True)
def predict(self,
feat: Dict[str, paddle.Tensor],
ensemble_representations: bool = True,
return_representations: bool = True):
"""Predict protein structure and encoding representation"""
if self.dynamic_subbatch_size:
seq_len = feat['aatype'].shape[-1]
extra_msa_num = feat['extra_msa'].shape[-2]
self.update_subbatch_size(seq_len, extra_msa_num)
with paddle.no_grad():
ret = self.alphafold(
feat, {},
ensemble_representations=ensemble_representations,
return_representations=return_representations,
compute_loss=False)
print('Prediction finished')
tensor_to_numpy(ret)
return ret
def postprocess(self,
aatype: np.ndarray,
residue_index: np.ndarray,
relaxer: relax.AmberRelaxation,
prediction: Dict[str, np.ndarray],
output_dir: pathlib.Path,
slice_idx: int = 0,
timings: Optional[Dict[str, float]] = None):
"""Compute pLDDT, save unrelaxed pdb and execute relaxation"""
print('Running postprocessing, it may takes long time')
single_pred = slice_pred_dict(prediction, slice_idx)
prediction.update(utils.get_confidence_metrics(single_pred))
plddt = prediction['plddt']
logger.info(f'{self.name} average pLDDT: {np.mean(plddt)}')
if 'max_predicted_aligned_error' in prediction:
err = prediction['max_predicted_aligned_error']
logger.info(f'{self.name} max predicted aligned error: {err}')
with open(output_dir.joinpath(f'result_{self.name}.pkl'), 'wb') as f:
pickle.dump(prediction, f, protocol=4)
plddt_b_factors = np.repeat(
plddt[:, None], residue_constants.atom_type_num, axis=-1)
prot = utils.generate_unrelaxed_pdb(
aatype, residue_index, single_pred,
output_dir.joinpath(f'unrelaxed_{self.name}.pdb'),
b_factors=plddt_b_factors)
if relaxer is not None:
t0 = time.time()
relaxed_pdb_str = relaxer.process(prot=prot)[0]
if timings is not None:
timings[f'relax_{self.name}'] = time.time() - t0
pdb = f'relaxed_{self.name}.pdb'
with open(output_dir.joinpath(pdb), 'w') as f:
f.write(relaxed_pdb_str)
print('Saved {}'.format(output_dir.joinpath(pdb)))
else:
relaxed_pdb_str = protein.to_pdb(prot)
print('Postprocessing finished')
return relaxed_pdb_str
def update_subbatch_size(self, seq_len, extra_msa_num):
if extra_msa_num == 5120:
if seq_len < 200:
# disable subbatch
self.alphafold.global_config.subbatch_size = 5120
elif extra_msa_num == 1024:
if seq_len < 600:
# disable subbatch
self.alphafold.global_config.subbatch_size = 1024
else:
raise ValueError('Unknown subbatch strategy')
| 11,831 | 36.561905 | 148 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_folding/helixfold/utils/misc.py | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
misc utils
"""
from collections import OrderedDict
import logging
__all__ = ['AverageMeter']
class AverageMeter(object):
"""
Computes and stores the average and current value
Code was based on https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self, name='', fmt='f', postfix="", need_avg=True):
self.name = name
self.fmt = fmt
self.postfix = postfix
self.need_avg = need_avg
self.reset()
def reset(self):
""" reset """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
""" update """
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
@property
def total(self):
""" total """
return '{self.name}_sum: {self.sum:{self.fmt}}{self.postfix}'.format(
self=self)
@property
def total_minute(self):
""" total minute"""
return '{self.name} {s:{self.fmt}}{self.postfix} min'.format(
s=self.sum / 60, self=self)
@property
def mean(self):
""" mean """
return '{self.name}: {self.avg:{self.fmt}}{self.postfix}'.format(
self=self) if self.need_avg else ''
@property
def value(self):
""" value """
return '{self.name}: {self.val:{self.fmt}}{self.postfix}'.format(
self=self)
class TrainLogger(object):
""" A warpper of training logger"""
def __init__(self):
self.info = OrderedDict()
self.info['loss'] = AverageMeter("loss", ".5f", postfix=", ")
self.info['reader_cost'] = AverageMeter("reader_cost", ".5f", postfix="s, ")
self.info['forward_cost'] = AverageMeter("forward_cost", ".5f", postfix="s, ")
self.info['backward_cost'] = AverageMeter("backward_cost", ".5f", postfix="s, ")
self.info['gradsync_cost'] = AverageMeter("gradsync_cost", ".5f", postfix="s, ")
self.info['update_cost'] = AverageMeter("update_cost", ".5f", postfix="s, ")
self.info['batch_cost'] = AverageMeter("batch_cost", ".5f", postfix="s, ")
self.info['avg_loss'] = AverageMeter("avg_loss", ".5f", postfix=", ")
self.info['protein'] = AverageMeter("protein", "d", postfix=", ")
self.info['train_cost'] = AverageMeter("train_cost", ".5f", postfix=", ")
def update(self, key, value, n=1):
""" update value by key """
self.info[key].update(value, n=n)
def reset(self, key=None):
""" reset all the item if key==None, otherwise reset the item by key"""
if key is None:
for k in self.info:
self.info[k].reset()
else:
self.info[key].reset()
def mean(self, key):
""" get mean value by key """
return self.info[key].avg
def sum(self, key):
""" get sum value by key """
return self.info[key].sum
def state_dict(self):
""" get state dict """
state = {}
for key in self.info:
if 'protein' == key or 'train_cost' == key:
state[key] = self.info[key].sum
else:
state[key] = self.info[key].avg
state['ips'] = self.info["protein"].sum / self.info["batch_cost"].sum
return state
def msg(self):
""" return string """
log_msg = ''
for key in self.info:
if 'protein' == key or 'train_cost' == key:
log_msg += self.info[key].total
else:
log_msg += self.info[key].mean
log_msg += f"ips: {self.info['protein'].sum / self.info['batch_cost'].sum:.5f} protein/s"
return log_msg
def set_logging_level(level):
level_dict = {
"NOTSET": logging.NOTSET,
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL
}
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=level_dict[level],
datefmt='%Y-%m-%d %H:%M:%S')
| 4,773 | 31.040268 | 97 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/pretrained_compound/ChemRL/GEM-2/src/dataset.py | #!/usr/bin/python
#-*-coding:utf-8-*-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dataset
"""
import os
from os.path import join, exists
import numpy as np
import pandas as pd
from rdkit import Chem
import paddle
import torch
from pahelix.datasets.inmemory_dataset import InMemoryDataset
from .utils import tree_map
class PCQMv2Dataset(paddle.io.Dataset):
def __init__(self, dataset_config):
self.data_dir = dataset_config.data_dir
self.load_sdf = dataset_config.load_sdf
self.task_names = dataset_config.task_names
self.raw_dir = join(self.data_dir, 'raw')
self.sdf_file = join(self.data_dir, 'pcqm4m-v2-train.sdf')
self.split_dict_file = os.path.join(self.data_dir, 'split_dict.pt')
def load_dataset_dict(self):
csv_file = os.listdir(self.raw_dir)[0]
input_df = pd.read_csv(join(self.raw_dir, csv_file), sep=',')
smiles_list = input_df['smiles']
labels = input_df[self.task_names]
if self.load_sdf:
suppl = Chem.SDMolSupplier(self.sdf_file)
data_list = []
for i in range(len(smiles_list)):
data = {}
data['smiles'] = smiles_list[i]
data['label'] = labels.values[i]
if self.load_sdf and i < len(suppl):
data['mol'] = suppl[i]
data_list.append(data)
dataset = InMemoryDataset(data_list)
split_dict = torch.load(self.split_dict_file)
dataset_dict = tree_map(lambda x: dataset[list(x)], split_dict)
return dataset_dict
def get_task_names(self):
return self.task_names
def get_label_stat(self):
"""Return mean and std of labels"""
csv_file = join(self.raw_dir, 'data.csv.gz')
input_df = pd.read_csv(csv_file, sep=',')
labels = input_df[self.task_names].dropna().values
return {
'mean': np.mean(labels, 0),
'std': np.std(labels, 0),
'N': len(labels),
}
| 2,678 | 32.911392 | 113 | py |
PaddleHelix-dev | PaddleHelix-dev/apps/protein_function_prediction/PTHL/layers.py | import paddle
from paddle import nn
import paddle.nn.functional as F
from utils import _norm_no_nan
import pgl
import pgl.math as math
class GVP(nn.Layer):
'''
Paddle version of the GVP proposed by https://github.com/drorlab/gvp-pytorch
'''
def __init__(self, in_dims, out_dims, h_dim=None, activations=(F.relu, F.sigmoid)):
super().__init__()
self.si, self.vi = in_dims
self.so, self.vo = out_dims
if self.vi:
self.h_dim = h_dim or max(self.vi, self.vo)
self.wh = Linear(self.vi, self.h_dim, bias_attr=False)
self.ws = Linear(self.h_dim + self.si, self.so)
if self.vo:
self.wv = Linear(self.h_dim, self.vo, bias_attr=False)
else:
self.ws = Linear(self.si, self.so)
self.scalar_act, self.vector_act = activations
def forward(self, x):
if self.vi:
s, v = x
v = paddle.transpose(v, [0, 2, 1])
vh = self.wh(v)
vn = _norm_no_nan(vh, axis=-2)
s = self.ws(paddle.concat([s, vn], -1))
if self.vo:
v = self.wv(vh)
v = paddle.transpose(v, [0, 2, 1])
if self.vector_act:
v = v * self.vector_act(_norm_no_nan(v, axis=-1, keepdim=True))
else:
s = self.ws(x)
if self.vo:
v = paddle.zeros(paddle.shape(s)[0], self.vo, 3)
if self.scalar_act:
s = self.scalar_act(s)
return (s, v) if self.vo else s
class RR_VPConv(nn.Layer):
def __init__(self, in_dims, out_dims, activations=(F.relu, F.sigmoid)):
super().__init__()
self.si, self.vi = in_dims
self.so, self.vo = out_dims
self.vp_layer = GVP((in_dims[0], in_dims[1]),
out_dims, h_dim=out_dims[1], activations=activations)
def forward(self, graph, x, local_sys):
src_feat = {'s_f': x[0], 'v_f':x[1]}
dst_feat = {'local_sys': local_sys}
msg = graph.send(self.send_func, src_feat=src_feat, dst_feat=dst_feat)
n_s_f = graph.recv(self.s_recv_func, msg)
n_v_f = graph.recv(self.v_recv_func, msg)
n_v_f = paddle.reshape(n_v_f, [n_v_f.shape[0], -1, 3])
return (n_s_f, n_v_f)
def send_func(self, src_feat, dst_feat, edge_feat):
local_sys = dst_feat['local_sys']
v_f_local = src_feat['v_f'] @ local_sys
s_f = src_feat['s_f']
x = self.vp_layer((s_f, v_f_local))
s, v = x[0], x[1] @ paddle.transpose(local_sys, [0, 2, 1])
v = paddle.reshape(v, [v.shape[0], -1]) ## Only 2D tensors are accepted
return {'s': s, 'v': v}
def s_recv_func(self, msg):
return msg.reduce_sum(msg['s'])
def v_recv_func(self, msg):
return msg.reduce_sum(msg['v'])
def Linear(in_features, out_features, weight_attr=None, bias_attr=None, name=None):
k = (1 / in_features) ** 0.5
if weight_attr is None:
weight_attr = paddle.ParamAttr(
# name="weight",
initializer=paddle.nn.initializer.Uniform(low=-k, high=k))
if bias_attr:
bias_attr = paddle.ParamAttr(
# name="bias",
initializer=paddle.nn.initializer.Uniform(low=-k, high=k))
return nn.Linear(in_features, out_features, weight_attr=weight_attr, bias_attr=bias_attr, name=name)
| 3,468 | 31.12037 | 104 | py |
PaddleHelix-dev | PaddleHelix-dev/pahelix/utils/metrics/molecular_generation/metrics_.py | #!/usr/bin/python3
#-*-coding:utf-8-*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
metrics
"""
import warnings
from multiprocessing import Pool
import numpy as np
from scipy.spatial.distance import cosine as cos_distance
# from fcd_torch import FCD as FCDMetric
from scipy.stats import wasserstein_distance
from rdkit import rdBase
from pahelix.utils.metrics.molecular_generation.utils_ import compute_fragments, average_agg_tanimoto, \
compute_scaffolds, fingerprints, \
get_mol, canonic_smiles, mol_passes_filters, \
logP, QED, SA, weight, mapper
def disable_rdkit_log():
"""tbd"""
rdBase.DisableLog('rdApp.*')
def enable_rdkit_log():
"""tbd"""
rdBase.EnableLog('rdApp.*')
def get_all_metrics(gen, k=None, n_jobs=1,
device='cpu', batch_size=512, pool=None,
test=None, test_scaffolds=None,
ptest=None, ptest_scaffolds=None,
train=None):
"""Computes all available metrics between test (scaffold test)
and generated sets of SMILES.
Description:
Available metrics:
* %valid
* %unique@k
* Frechet ChemNet Distance (FCD)
* Fragment similarity (Frag)
* Scaffold similarity (Scaf)
* Similarity to nearest neighbour (SNN)
* Internal diversity (IntDiv)
* Internal diversity 2: using square root of mean squared
Tanimoto similarity (IntDiv2)
* %passes filters (Filters)
* Distribution difference for logP, SA, QED, weight
* Novelty (molecules not present in train)
Args:
gen: list of generated SMILES
k: int or list with values for unique@k. Will calculate number of
unique molecules in the first k molecules. Default [1000, 10000]
n_jobs: number of workers for parallel processing
device: 'cpu' or 'cuda:n', where n is GPU device number
batch_size: batch size for FCD metric
pool: optional multiprocessing pool to use for parallelization
test (None or list): test SMILES. If None, will load
a default test set
test_scaffolds (None or list): scaffold test SMILES. If None, will
load a default scaffold test set
ptest (None or dict): precalculated statistics of the test set. If
None, will load default test statistics. If you specified a custom
test set, default test statistics will be ignored
ptest_scaffolds (None or dict): precalculated statistics of the
scaffold test set If None, will load default scaffold test
statistics. If you specified a custom test set, default test
statistics will be ignored
train (None or list): train SMILES. If None, will load a default
train set
"""
if k is None:
k = [1000, 10000]
disable_rdkit_log()
metrics = {}
close_pool = False
if pool is None:
if n_jobs != 1:
pool = Pool(n_jobs)
close_pool = True
else:
pool = 1
metrics['valid'] = fraction_valid(gen, n_jobs=pool)
gen = remove_invalid(gen, canonize=True)
if not isinstance(k, (list, tuple)):
k = [k]
for _k in k:
metrics['unique@{}'.format(_k)] = fraction_unique(gen, _k, pool)
mols = mapper(pool)(get_mol, gen)
kwargs = {'n_jobs': pool, 'device': device, 'batch_size': batch_size}
# kwargs_fcd = {'n_jobs': n_jobs, 'device': device, 'batch_size': batch_size}
if test or ptest:
if ptest is None:
ptest = compute_intermediate_statistics(test, n_jobs=n_jobs,
device=device,
batch_size=batch_size,
pool=pool)
if test_scaffolds is not None and ptest_scaffolds is None:
ptest_scaffolds = compute_intermediate_statistics(
test_scaffolds, n_jobs=n_jobs,
device=device, batch_size=batch_size,
pool=pool
)
# metrics['FCD/Test'] = FCDMetric(**kwargs_fcd)(gen=gen, pref=ptest['FCD'])
metrics['SNN/Test'] = SNNMetric(**kwargs)(gen=mols, pref=ptest['SNN'])
metrics['Frag/Test'] = FragMetric(**kwargs)(gen=mols, pref=ptest['Frag'])
metrics['Scaf/Test'] = ScafMetric(**kwargs)(gen=mols, pref=ptest['Scaf'])
# Properties
for name, func in [('logP', logP), ('SA', SA),
('QED', QED),
('weight', weight)]:
metrics[name] = WassersteinMetric(func, **kwargs)(
gen=mols, pref=ptest[name])
if test_scaffolds or ptest_scaffolds:
if ptest_scaffolds is not None:
# metrics['FCD/TestSF'] = FCDMetric(**kwargs_fcd)(
# gen=gen, pref=ptest_scaffolds['FCD']
# )
metrics['SNN/TestSF'] = SNNMetric(**kwargs)(
gen=mols, pref=ptest_scaffolds['SNN']
)
metrics['Frag/TestSF'] = FragMetric(**kwargs)(
gen=mols, pref=ptest_scaffolds['Frag']
)
metrics['Scaf/TestSF'] = ScafMetric(**kwargs)(
gen=mols, pref=ptest_scaffolds['Scaf']
)
metrics['IntDiv'] = internal_diversity(mols, pool, device=device)
metrics['IntDiv2'] = internal_diversity(mols, pool, device=device, p=2)
metrics['Filters'] = fraction_passes_filters(mols, pool)
if train is not None:
metrics['Novelty'] = novelty(mols, train, pool)
enable_rdkit_log()
if close_pool:
pool.close()
pool.join()
return metrics
def compute_intermediate_statistics(smiles, n_jobs=1, device='cpu',
batch_size=512, pool=None):
""" The function precomputes statistics such as mean and variance for FCD, etc.
It is useful to compute the statistics for test and scaffold test sets to
speedup metrics calculation.
"""
close_pool = False
if pool is None:
if n_jobs != 1:
pool = Pool(n_jobs)
close_pool = True
else:
pool = 1
statistics = {}
mols = mapper(pool)(get_mol, smiles)
kwargs = {'n_jobs': pool, 'device': device, 'batch_size': batch_size}
kwargs_fcd = {'n_jobs': n_jobs, 'device': device, 'batch_size': batch_size}
# statistics['FCD'] = FCDMetric(**kwargs_fcd).precalc(smiles)
statistics['SNN'] = SNNMetric(**kwargs).precalc(mols)
statistics['Frag'] = FragMetric(**kwargs).precalc(mols)
statistics['Scaf'] = ScafMetric(**kwargs).precalc(mols)
for name, func in [('logP', logP), ('SA', SA),
('QED', QED),
('weight', weight)]:
statistics[name] = WassersteinMetric(func, **kwargs).precalc(mols)
if close_pool:
pool.terminate()
return statistics
def fraction_passes_filters(gen, n_jobs=1):
"""Computes the fraction of molecules that pass filters:
* MCF
* PAINS
* Only allowed atoms ('C','N','S','O','F','Cl','Br','H')
* No charges
"""
passes = mapper(n_jobs)(mol_passes_filters, gen)
return np.mean(passes)
def internal_diversity(gen, n_jobs=1, device='cpu', fp_type='morgan',
gen_fps=None, p=1):
"""Computes internal diversity as:
1/|A|^2 sum_{x, y in AxA} (1-tanimoto(x, y))
"""
if gen_fps is None:
gen_fps = fingerprints(gen, fp_type=fp_type, n_jobs=n_jobs)
return 1 - (average_agg_tanimoto(gen_fps, gen_fps,
agg='mean', device=device, p=p)).mean()
def fraction_unique(gen, k=None, n_jobs=1, check_validity=True):
"""Computes a number of unique molecules
Args:
gen: list of SMILES
k: compute unique@k
n_jobs: number of threads for calculation
check_validity: raises ValueError if invalid molecules are present
"""
if k is not None:
if len(gen) < k:
warnings.warn(
"Can't compute unique@{}.".format(k) +
"gen contains only {} molecules".format(len(gen))
)
gen = gen[:k]
canonic = set(mapper(n_jobs)(canonic_smiles, gen))
if None in canonic and check_validity:
raise ValueError("Invalid molecule passed to unique@k")
return len(canonic) / len(gen)
def fraction_valid(gen, n_jobs=1):
"""Computes a number of valid molecules
Args:
gen: list of SMILES
n_jobs: number of threads for calculation
"""
gen = mapper(n_jobs)(get_mol, gen)
return 1 - gen.count(None) / len(gen)
def novelty(gen, train, n_jobs=1):
"""
tbd
"""
gen_smiles = mapper(n_jobs)(canonic_smiles, gen)
gen_smiles_set = set(gen_smiles) - {None}
train_set = set(train)
return len(gen_smiles_set - train_set) / len(gen_smiles_set)
def remove_invalid(gen, canonize=True, n_jobs=1):
"""
Removes invalid molecules from the dataset
"""
if not canonize:
mols = mapper(n_jobs)(get_mol, gen)
return [gen_ for gen_, mol in zip(gen, mols) if mol is not None]
return [x for x in mapper(n_jobs)(canonic_smiles, gen) if
x is not None]
class Metric(object):
"""tbd"""
def __init__(self, n_jobs=1, device='cpu', batch_size=512, **kwargs):
self.n_jobs = n_jobs
self.device = device
self.batch_size = batch_size
for k, v in kwargs.values():
setattr(self, k, v)
def __call__(self, ref=None, gen=None, pref=None, pgen=None):
assert (ref is None) != (pref is None), "specify ref xor pref"
assert (gen is None) != (pgen is None), "specify gen xor pgen"
if pref is None:
pref = self.precalc(ref)
if pgen is None:
pgen = self.precalc(gen)
return self.metric(pref, pgen)
def precalc(self, moleclues):
"""tbd"""
raise NotImplementedError
def metric(self, pref, pgen):
"""tbd"""
raise NotImplementedError
class SNNMetric(Metric):
"""
Computes average max similarities of gen SMILES to ref SMILES
"""
def __init__(self, fp_type='morgan', **kwargs):
self.fp_type = fp_type
super().__init__(**kwargs)
def precalc(self, mols):
return {'fps': fingerprints(mols, n_jobs=self.n_jobs,
fp_type=self.fp_type)}
def metric(self, pref, pgen):
return average_agg_tanimoto(pref['fps'], pgen['fps'],
device=self.device)
def cos_similarity(ref_counts, gen_counts):
"""
Computes cosine similarity between
dictionaries of form {name: count}. Non-present
elements are considered zero:
sim = <r, g> / ||r|| / ||g||
"""
if len(ref_counts) == 0 or len(gen_counts) == 0:
return np.nan
keys = np.unique(list(ref_counts.keys()) + list(gen_counts.keys()))
ref_vec = np.array([ref_counts.get(k, 0) for k in keys])
gen_vec = np.array([gen_counts.get(k, 0) for k in keys])
return 1 - cos_distance(ref_vec, gen_vec)
class FragMetric(Metric):
"""tbd"""
def precalc(self, mols):
return {'frag': compute_fragments(mols, n_jobs=self.n_jobs)}
def metric(self, pref, pgen):
return cos_similarity(pref['frag'], pgen['frag'])
class ScafMetric(Metric):
"""
tbd
"""
def precalc(self, mols):
return {'scaf': compute_scaffolds(mols, n_jobs=self.n_jobs)}
def metric(self, pref, pgen):
return cos_similarity(pref['scaf'], pgen['scaf'])
class WassersteinMetric(Metric):
"""tbd"""
def __init__(self, func=None, **kwargs):
self.func = func
super().__init__(**kwargs)
def precalc(self, mols):
if self.func is not None:
values = mapper(self.n_jobs)(self.func, mols)
else:
values = mols
return {'values': values}
def metric(self, pref, pgen):
return wasserstein_distance(
pref['values'], pgen['values']
)
| 12,863 | 33.580645 | 114 | py |
PaddleHelix-dev | PaddleHelix-dev/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sphinx_rtd_theme
import os
import sys
import re
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'PaddleHelix'
author = u'2021, Baidu Inc.'
copyright = author
language = 'en'
# Import mock dependencies packages
autodoc_mock_imports = ['paddle', 'pgl', 'rdkit', 'numpy', 'random', 'pandas', 'sklearn', 'networkx', 'pdb', 'math', 'enum', 'collections', 'json', 'pickle', 'argparse', 're']
# -- General configuration ---------------------------------------------------
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
# The master toctree document.
master_doc = 'index'
# The suffix of source filenames.
source_suffix = '.rst'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'recommonmark',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
#autoapi_ignore = ["*/pahelix/__init__.py", "*/pahelix/*/__init__.py"]
# do not skip documentation of the __init__ function of a class
#def skip(app, what, name, obj, would_skip, options):
# if name == "__init__":
# return False
# return would_skip
#def setup(app):
# app.connect("autodoc-skip-member", skip)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'logo_only': True,
'navigation_depth': 5,
}
html_context = {}
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 3,510 | 30.348214 | 175 | py |
PaddleHelix-dev | PaddleHelix-dev/competition/kddcup2021-PCQM4M-LSC/models/conv.py | import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import pgl
import pgl.nn as gnn
from pgl.utils.logger import log
import models.mol_encoder as ME
import models.layers as L
class LiteGEM(paddle.nn.Layer):
def __init__(self, config, with_efeat=False):
super(LiteGEM, self).__init__()
log.info("gnn_type is %s" % self.__class__.__name__)
self.config = config
self.with_efeat = with_efeat
self.num_layers = config.num_layers
self.drop_ratio = config.drop_ratio
self.virtual_node = config.virtual_node
self.emb_dim = config.emb_dim
self.norm = config.norm
self.gnns = paddle.nn.LayerList()
self.norms = paddle.nn.LayerList()
if self.virtual_node:
log.info("using virtual node in %s" % self.__class__.__name__)
self.mlp_virtualnode_list = paddle.nn.LayerList()
self.virtualnode_embedding = self.create_parameter(
shape=[1, self.emb_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(value=0.0))
for layer in range(self.num_layers - 1):
self.mlp_virtualnode_list.append(L.MLP([self.emb_dim] * 3,
norm=self.norm))
for layer in range(self.num_layers):
self.gnns.append(L.LiteGEMConv(config, with_efeat=not self.with_efeat))
self.norms.append(L.norm_layer(self.norm, self.emb_dim))
self.atom_encoder = getattr(ME, self.config.atom_enc_type, ME.AtomEncoder)(
emb_dim=self.emb_dim)
if self.config.exfeat:
self.atom_encoder_float = ME.AtomEncoderFloat(emb_dim=self.emb_dim)
if self.with_efeat:
self.bond_encoder = getattr(ME, self.config.bond_enc_type, ME.BondEncoder)(
emb_dim=self.emb_dim)
self.pool = gnn.GraphPool(pool_type="sum")
if self.config.appnp_k is not None:
self.appnp = gnn.APPNP(k_hop=self.config.appnp_k, alpha=self.config.appnp_a)
if self.config.graphnorm is not None:
self.gn = gnn.GraphNorm()
def forward(self, feed_dict):
g = feed_dict["graph"]
x = g.node_feat["feat"]
edge_feat = g.edge_feat["feat"]
h = self.atom_encoder(x)
if self.config.exfeat:
h += self.atom_encoder_float(g.node_feat["feat_float"])
# print("atom_encoder: ", np.sum(h.numpy()))
if self.virtual_node:
virtualnode_embedding = self.virtualnode_embedding.expand(
[g.num_graph, self.virtualnode_embedding.shape[-1]])
h = h + paddle.gather(virtualnode_embedding, g.graph_node_id)
# print("virt0: ", np.sum(h.numpy()))
if self.with_efeat:
edge_emb = self.bond_encoder(edge_feat)
else:
edge_emb = edge_feat
h = self.gnns[0](g, h, edge_emb)
if self.config.graphnorm:
h = self.gn(g, h)
# print("h0: ", np.sum(h.numpy()))
for layer in range(1, self.num_layers):
h1 = self.norms[layer-1](h)
h2 = F.swish(h1)
h2 = F.dropout(h2, p=self.drop_ratio, training=self.training)
if self.virtual_node:
virtualnode_embedding_temp = self.pool(g, h2) + virtualnode_embedding
virtualnode_embedding = self.mlp_virtualnode_list[layer-1](virtualnode_embedding_temp)
virtualnode_embedding = F.dropout(
virtualnode_embedding,
self.drop_ratio,
training=self.training)
h2 = h2 + paddle.gather(virtualnode_embedding, g.graph_node_id)
# print("virt_h%s: " % (layer), np.sum(h2.numpy()))
h = self.gnns[layer](g, h2, edge_emb) + h
if self.config.graphnorm:
h = self.gn(g, h)
# print("h%s: " % (layer), np.sum(h.numpy()))
h = self.norms[self.num_layers-1](h)
h = F.dropout(h, p=self.drop_ratio, training=self.training)
if self.config.appnp_k is not None:
h = self.appnp(g, h)
# print("node_repr: ", np.sum(h.numpy()))
node_representation = h
return node_representation
class GNNVirt(paddle.nn.Layer):
def __init__(self, config):
super(GNNVirt, self).__init__()
log.info("gnn_type is %s" % self.__class__.__name__)
self.config = config
self.atom_encoder = getattr(ME, self.config.atom_enc_type, ME.AtomEncoder)(
self.config.emb_dim)
self.virtualnode_embedding = self.create_parameter(
shape=[1, self.config.emb_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(value=0.0))
self.convs = paddle.nn.LayerList()
self.batch_norms = paddle.nn.LayerList()
self.mlp_virtualnode_list = paddle.nn.LayerList()
for layer in range(self.config.num_layers):
self.convs.append(getattr(L, self.config.layer_type)(self.config))
self.batch_norms.append(L.batch_norm_1d(self.config.emb_dim))
for layer in range(self.config.num_layers - 1):
self.mlp_virtualnode_list.append(
nn.Sequential(L.Linear(self.config.emb_dim, self.config.emb_dim),
L.batch_norm_1d(self.config.emb_dim),
nn.Swish(),
L.Linear(self.config.emb_dim, self.config.emb_dim),
L.batch_norm_1d(self.config.emb_dim),
nn.Swish())
)
self.pool = gnn.GraphPool(pool_type="sum")
def forward(self, feed_dict):
g = feed_dict["graph"]
x = g.node_feat["feat"]
edge_feat = g.edge_feat["feat"]
h_list = [self.atom_encoder(x)]
virtualnode_embedding = self.virtualnode_embedding.expand(
[g.num_graph, self.virtualnode_embedding.shape[-1]])
for layer in range(self.config.num_layers):
h_list[layer] = h_list[layer] + \
paddle.gather(virtualnode_embedding, g.graph_node_id)
### Message passing among graph nodes
h = self.convs[layer](g, h_list[layer], edge_feat)
h = self.batch_norms[layer](h)
if layer == self.config.num_layers - 1:
#remove relu for the last layer
h = F.dropout(h, self.config.drop_ratio, training = self.training)
else:
h = F.dropout(F.swish(h), self.config.drop_ratio, training = self.training)
if self.config.residual:
h = h + h_list[layer]
h_list.append(h)
### update the virtual nodes
if layer < self.config.num_layers - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = self.pool(g, h_list[layer]) + virtualnode_embedding
### transform virtual nodes using MLP
if self.config.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp),
self.config.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp),
self.config.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.config.JK == "last":
node_representation = h_list[-1]
elif self.config.JK == "sum":
node_representation = 0
for layer in range(self.config.num_layers):
node_representation += h_list[layer]
return node_representation
### Virtual GNN to generate node embedding
class JuncGNNVirt(paddle.nn.Layer):
"""
Output:
node representations
"""
def __init__(self, config):
super(JuncGNNVirt, self).__init__()
log.info("gnn_type is %s" % self.__class__.__name__)
self.config = config
self.num_layers = config.num_layers
self.drop_ratio = config.drop_ratio
self.JK = config.JK
self.residual = config.residual
self.emb_dim = config.emb_dim
self.gnn_type = config.gnn_type
self.layer_type = config.layer_type
if self.num_layers < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.atom_encoder = getattr(ME, self.config.atom_enc_type, ME.AtomEncoder)(
self.emb_dim)
self.junc_embed = paddle.nn.Embedding(6000, self.emb_dim)
### set the initial virtual node embedding to 0.
# self.virtualnode_embedding = paddle.nn.Embedding(1, emb_dim)
# torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)
self.virtualnode_embedding = self.create_parameter(
shape=[1, self.emb_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(value=0.0))
### List of GNNs
self.convs = nn.LayerList()
### batch norms applied to node embeddings
self.batch_norms = nn.LayerList()
### List of MLPs to transform virtual node at every layer
self.mlp_virtualnode_list = nn.LayerList()
self.junc_convs = nn.LayerList()
for layer in range(self.num_layers):
self.convs.append(getattr(L, self.layer_type)(self.config))
self.junc_convs.append(gnn.GINConv(self.emb_dim, self.emb_dim))
self.batch_norms.append(L.batch_norm_1d(self.emb_dim))
for layer in range(self.num_layers - 1):
self.mlp_virtualnode_list.append(
nn.Sequential(L.Linear(self.emb_dim, self.emb_dim),
L.batch_norm_1d(self.emb_dim),
nn.Swish(),
L.Linear(self.emb_dim, self.emb_dim),
L.batch_norm_1d(self.emb_dim),
nn.Swish())
)
self.pool = gnn.GraphPool(pool_type="sum")
def forward(self, feed_dict):
g = feed_dict['graph']
x = g.node_feat["feat"]
edge_feat = g.edge_feat["feat"]
h_list = [self.atom_encoder(x)]
### virtual node embeddings for graphs
virtualnode_embedding = self.virtualnode_embedding.expand(
[g.num_graph, self.virtualnode_embedding.shape[-1]])
junc_feat = self.junc_embed(feed_dict['junc_graph'].node_feat['feat'])
junc_feat = paddle.squeeze(junc_feat, axis=1)
for layer in range(self.num_layers):
### add message from virtual nodes to graph nodes
h_list[layer] = h_list[layer] + paddle.gather(virtualnode_embedding, g.graph_node_id)
### Message passing among graph nodes
h = self.convs[layer](g, h_list[layer], edge_feat)
h = self.batch_norms[layer](h)
if layer == self.num_layers - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.swish(h), self.drop_ratio, training = self.training)
if self.residual:
h = h + h_list[layer]
# junction tree aggr
atom_index = feed_dict['mol2junc'][:, 0]
junc_index = feed_dict['mol2junc'][:, 1]
gather_h = paddle.gather(h, atom_index)
out_dim = gather_h.shape[-1]
num = feed_dict['junc_graph'].num_nodes
init_h = paddle.zeros(shape=[num, out_dim], dtype=gather_h.dtype)
junc_h = paddle.scatter(init_h, junc_index, gather_h, overwrite=False)
# node feature of junction tree
junc_h = junc_feat + junc_h
junc_h = self.junc_convs[layer](feed_dict['junc_graph'], junc_h)
junc_h = paddle.gather(junc_h, junc_index)
init_h = paddle.zeros(shape=[feed_dict['graph'].num_nodes, out_dim], dtype=h.dtype)
sct_h = paddle.scatter(init_h, atom_index, junc_h, overwrite=False)
h = h + sct_h
h_list.append(h)
### update the virtual nodes
if layer < self.num_layers - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = self.pool(g, h_list[layer]) + virtualnode_embedding
### transform virtual nodes using MLP
if self.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layers):
node_representation += h_list[layer]
return node_representation
if __name__ == "__main__":
pass
| 13,481 | 38.421053 | 182 | py |
PaddleHelix-dev | PaddleHelix-dev/competition/kddcup2021-PCQM4M-LSC/ensemble/ensemble.py | import os
import os.path as osp
import glob
import pickle
import numpy as np
import pandas as pd
import sklearn
import sklearn.linear_model
import torch
def mae(pred, true):
return np.mean(np.abs(pred-true))
model_root = "./model_pred"
max_min_drop_rate = 0.2 # <=0 means no drop, should < 0.5
split_idx = torch.load("../dataset/pcqm4m_kddcup2021/split_dict.pt")
cross_idx = pickle.load(open("../dataset/cross_split.pkl", 'rb'))
raw_df = pd.read_csv('../dataset/pcqm4m_kddcup2021/raw/data.csv.gz',
compression='gzip', sep=',')
model_cross_valid = glob.glob(osp.join(model_root, "**", 'crossvalid*'), recursive=True)
model_left_valid = [fname.replace('crossvalid', 'leftvalid') for fname in model_cross_valid]
model_test = [fname.replace('crossvalid', 'test') for fname in model_cross_valid]
def process(cross=1):
model_path = []
crossvalid_pred = []
leftvalid_pred = []
test_pred = []
for i, model_p in enumerate(model_cross_valid):
model_name = osp.dirname(model_p)
if cross == 1 and model_name.endswith('cross2'):
continue
elif cross == 2 and model_name.endswith('cross1'):
continue
model_path.append(osp.splitext(model_p)[0])
crossvalid_pred.append(np.load(model_p)['arr_0'])
leftvalid_pred.append(np.load(model_left_valid[i])['arr_0'])
test_pred.append(np.load(model_test[i])['arr_0'])
return np.array(model_path).T, np.array(crossvalid_pred).T, \
np.array(leftvalid_pred).T, np.array(test_pred).T
cross1_model_path, cross1_valid, cross1_left, cross1_test = process(cross=1)
cross2_model_path, cross2_valid, cross2_left, cross2_test = process(cross=2)
########################
def filter_model(pred, true):
model = sklearn.linear_model.HuberRegressor(max_iter=10000)
model.fit(pred, true)
idx_trivial = np.abs(model.coef_*len(model.coef_)) < 1.8
print("remaining #models:", np.sum(~idx_trivial))
return idx_trivial
print("filtering cross1 with low contribution ...")
cross1_valid_true = raw_df['homolumogap'][cross_idx['cross_valid_1']]
cross1_trivial_idx = filter_model(cross1_valid, cross1_valid_true)
cross1_valid = cross1_valid[:, ~cross1_trivial_idx]
print("filtering cross2 with low contribution ...")
cross2_valid_true = raw_df['homolumogap'][cross_idx['cross_valid_2']]
cross2_trivial_idx = filter_model(cross2_valid, cross2_valid_true)
cross2_valid = cross2_valid[:, ~cross2_trivial_idx]
########################
if max_min_drop_rate > 0:
drop_num = int(max_min_drop_rate * cross1_valid.shape[1])
cross1_valid = np.sort(cross1_valid)[:, drop_num:-drop_num]
cross2_valid = np.sort(cross2_valid)[:, drop_num:-drop_num]
cross1_model = sklearn.linear_model.HuberRegressor(max_iter=10000)
cross1_valid_true = raw_df['homolumogap'][cross_idx['cross_valid_1']]
print("fitting cross1 ...")
cross1_model.fit(cross1_valid, cross1_valid_true)
cross2_model = sklearn.linear_model.HuberRegressor(max_iter=10000)
cross2_valid_true = raw_df['homolumogap'][cross_idx['cross_valid_2']]
print("fitting cross2 ...")
cross2_model.fit(cross2_valid, cross2_valid_true)
########################
cross1_left = cross1_left[:, ~cross1_trivial_idx]
cross2_left = cross2_left[:, ~cross2_trivial_idx]
leftvalid_true = raw_df['homolumogap'][cross_idx['valid_left_1percent']]
drop_num = int(max_min_drop_rate * cross1_left.shape[1])
cross1_left = np.sort(cross1_left)[:, drop_num:-drop_num]
cross2_left = np.sort(cross2_left)[:, drop_num:-drop_num]
ensemble_left_pred = np.mean([cross1_model.predict(cross1_left),
cross2_model.predict(cross2_left)],
axis=0)
print("left valid mae:", mae(ensemble_left_pred, leftvalid_true))
cross1_test = cross1_test[:, ~cross1_trivial_idx]
cross2_test = cross2_test[:, ~cross2_trivial_idx]
cross1_test = np.sort(cross1_test)[:, drop_num:-drop_num]
cross2_test = np.sort(cross2_test)[:, drop_num:-drop_num]
########################
ensemble_test_pred = np.mean([cross1_model.predict(cross1_test),
cross2_model.predict(cross2_test)],
axis=0)
########################
test_smiles = raw_df['smiles'][split_idx['test']]
dic_known = {}
train_valid = np.append(split_idx['train'], split_idx['valid'])
smiles_list = raw_df['smiles']
value_list = raw_df['homolumogap'].values
for d in train_valid:
smiles = smiles_list[d]
value = value_list[d]
if not smiles in dic_known:
dic_known[smiles] = []
dic_known[smiles].append(value)
for i, smiles in enumerate(test_smiles):
if smiles in dic_known:
ensemble_test_pred[i] = np.mean(dic_known[smiles])
########################
print('cross1 models:', cross1_model_path[~cross1_trivial_idx])
print('cross2 models:', cross2_model_path[~cross2_trivial_idx])
np.savez_compressed('y_pred_pcqm4m.npz',
y_pred=ensemble_test_pred.astype(np.float32))
| 4,960 | 36.022388 | 92 | py |
PaddleHelix-dev | PaddleHelix-dev/competition/ogbg_molhiv/main.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import sys
import os
import math
import argparse
import traceback
import re
import io
import json
import yaml
import time
import logging
from tqdm import tqdm
import numpy as np
from collections import namedtuple
from ogb.graphproppred import GraphPropPredDataset
import paddle.fluid as F
import paddle.fluid.layers as L
import pgl
from pgl.utils import paddle_helper
from pgl.utils.data.dataloader import Dataloader
from propeller import log
log.setLevel(logging.DEBUG)
import propeller.paddle as propeller
from propeller.paddle.data import Dataset as PDataset
from utils.config import prepare_config, make_dir
from utils.logger import prepare_logger, log_to_file
from utils.util import int82strarr
from dataset import MolDataset, MgfCollateFn
from model import MgfModel
import dataset as DS
import model as M
def multi_epoch_dataloader(loader, epochs):
def _worker():
for i in range(epochs):
log.info("BEGIN: epoch %s ..." % i)
for batch in loader():
yield batch
log.info("END: epoch %s ..." % i)
return _worker
def train(args, pretrained_model_config=None):
log.info("loading data")
raw_dataset = GraphPropPredDataset(name=args.dataset_name)
args.num_class = raw_dataset.num_tasks
args.eval_metric = raw_dataset.eval_metric
args.task_type = raw_dataset.task_type
train_ds = MolDataset(args, raw_dataset)
args.eval_steps = math.ceil(len(train_ds) / args.batch_size)
log.info("Total %s steps (eval_steps) every epoch." % (args.eval_steps))
fn = MgfCollateFn(args)
train_loader = Dataloader(train_ds,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=args.shuffle,
stream_shuffle_size=args.shuffle_size,
collate_fn=fn)
# for evaluating
eval_train_loader = train_loader
eval_train_loader = PDataset.from_generator_func(eval_train_loader)
train_loader = multi_epoch_dataloader(train_loader, args.epochs)
train_loader = PDataset.from_generator_func(train_loader)
if args.warm_start_from is not None:
# warm start setting
def _fn(v):
if not isinstance(v, F.framework.Parameter):
return False
if os.path.exists(os.path.join(args.warm_start_from, v.name)):
return True
else:
return False
ws = propeller.WarmStartSetting(
predicate_fn=_fn,
from_dir=args.warm_start_from)
else:
ws = None
def cmp_fn(old, new):
if old['eval'][args.metrics] - new['eval'][args.metrics] > 0:
log.info("best %s eval result: %s" % (args.metrics, new['eval']))
return True
else:
return False
if args.log_id is not None:
save_best_model = int(args.log_id) == 5
else:
save_best_model = True
best_exporter = propeller.exporter.BestResultExporter(
args.output_dir, (cmp_fn, save_best_model))
eval_datasets = {"eval": eval_train_loader}
propeller.train.train_and_eval(
model_class_or_model_fn=MgfModel,
params=pretrained_model_config,
run_config=args,
train_dataset=train_loader,
eval_dataset=eval_datasets,
warm_start_setting=ws,
exporters=[best_exporter],
)
def infer(args):
log.info("loading data")
raw_dataset = GraphPropPredDataset(name=args.dataset_name)
args.num_class = raw_dataset.num_tasks
args.eval_metric = raw_dataset.eval_metric
args.task_type = raw_dataset.task_type
test_ds = MolDataset(args, raw_dataset, mode="test")
fn = MgfCollateFn(args, mode="test")
test_loader = Dataloader(test_ds,
batch_size=args.batch_size,
num_workers=1,
collate_fn=fn)
test_loader = PDataset.from_generator_func(test_loader)
est = propeller.Learner(MgfModel, args, args.model_config)
mgf_list = []
for soft_mgf in est.predict(test_loader,
ckpt_path=args.model_path_for_infer, split_batch=True):
mgf_list.append(soft_mgf)
mgf = np.concatenate(mgf_list)
log.info("saving features")
np.save("dataset/%s/soft_mgf_feat.npy" % (args.dataset_name.replace("-", "_")), mgf)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='gnn')
parser.add_argument("--config", type=str, default="./config.yaml")
parser.add_argument("--task_name", type=str, default="task_name")
parser.add_argument("--infer_model", type=str, default=None)
parser.add_argument("--log_id", type=str, default=None)
args = parser.parse_args()
if args.infer_model is not None:
config = prepare_config(args.config, isCreate=False, isSave=False)
config.model_path_for_infer = args.infer_model
infer(config)
else:
config = prepare_config(args.config, isCreate=True, isSave=True)
log_to_file(log, config.log_dir, config.log_filename)
if config.warm_start_from is not None:
log.info("loading model config from %s" % config.pretrained_config_file)
pretrained_config = prepare_config(config.pretrained_config_file)
pretrained_model_config = pretrained_config.pretrained_model_config
else:
pretrained_model_config = config.model_config
config.log_id = args.log_id
train(config, pretrained_model_config)
| 6,215 | 32.967213 | 88 | py |
interpretability | interpretability-master/context-atlas/preprocess.py |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing the data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
import sqlite3 as sql
import re
import numpy as np
import umap
import json
from tqdm import tqdm
import nltk
DB_PATH = './enwiki-20170820.db'
nltk.download('averaged_perceptron_tagger')
nltk.download('punkt')
def neighbors(word, sentences):
"""Get the info and (umap-projected) embeddings about a word."""
# Get part of speech of this word.
sent_data = get_poses(word, sentences)
# Get embeddings.
points = get_embeddings(word.lower(), sentences)
# Use UMAP to project down to 3 dimnsions.
points_transformed = project_umap(points)
return {'labels': sent_data, 'data': points_transformed}
def project_umap(points):
"""Project the words (by layer) into 3 dimensions using umap."""
points_transformed = []
for layer in points:
transformed = umap.UMAP().fit_transform(layer).tolist()
points_transformed.append(transformed)
return points_transformed
def get_embeddings(word, sentences):
"""Get the embedding for a word in each sentence."""
# Tokenized input
layers = range(-12, 0)
points = [[] for layer in layers]
print('Getting embeddings for %d sentences '%len(sentences))
for sentence in sentences:
sentence = '[CLS] ' + sentence + ' [SEP]'
tokenized_text = tokenizer.tokenize(sentence)
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
# should give you something like [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
sep_idxs = [-1] + [i for i, v in enumerate(tokenized_text) if v == '[SEP]']
segments_ids = []
for i in range(len(sep_idxs) - 1):
segments_ids += [i] * (sep_idxs[i+1] - sep_idxs[i])
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
tokens_tensor = tokens_tensor.to(device)
segments_tensors = segments_tensors.to(device)
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
encoded_layers = [l.cpu() for l in encoded_layers]
# We have a hidden states for each of the 12 layers in model bert-base-uncased
encoded_layers = [l.numpy() for l in encoded_layers]
try:
word_idx = tokenized_text.index(word)
# If the word is made up of multiple tokens, just use the first one of the tokens that make it up.
except:
for i, token in enumerate(tokenized_text):
if token == word[:len(token)]:
word_idx = i
# Reconfigure to have an array of layer: embeddings
for l in layers:
sentence_embedding = encoded_layers[l][0][word_idx]
points[l].append(sentence_embedding)
points = np.asarray(points)
return points
def tokenize_sentences(text):
"""Simple tokenizer."""
print('starting tokenization')
text = re.sub('\n', ' ', text)
sentences = re.split('(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
# Filter out too long sentences.
sentences = [t for t in sentences if len(t) < 150]
return sentences
def get_query(select, db=DB_PATH):
"""Executes a select statement and returns results and column/field names."""
with sql.connect(db) as conn:
c = conn.cursor()
c.execute(select)
col_names = [str(name[0]).lower() for name in c.description]
return c.fetchall(), col_names
def get_sentences():
"""Returns a bunch of sentences from wikipedia"""
print('Selecting sentences from wikipedia...')
select = 'select * from articles limit 5000000'
docs, _ = get_query(select)
docs = [doc[3] for doc in docs]
doc = ' '.join(docs)
print('Number of articles selected: %d'%len(docs))
sentences = tokenize_sentences(doc)
print('Total number of sentences: %d'%len(sentences))
np.random.shuffle(sentences)
return sentences
def get_poses(word, sentences):
"""Get the part of speech tag for the given word in a list of sentences."""
sent_data = []
for sent in sentences:
text = nltk.word_tokenize(sent)
pos = nltk.pos_tag(text)
try:
word_idx = text.index(word)
pos_tag = pos[word_idx][1]
except:
pos_tag = 'X'
sent_data.append({
'sentence': sent,
'pos': pos_tag
})
return sent_data
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device : ", device)
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-uncased')
model.eval()
model = model.to(device)
# Get selection of sentences from wikipedia.
with open('static/words.json') as f:
words = json.load(f)
sentences = get_sentences()
for word in tqdm(words):
# Filter out sentences that don't have the word.
sentences_w_word = [t for t in sentences if ' ' + word + ' ' in t]
# Take at most 200 sentences.
sentences_w_word = sentences_w_word[:1000]
# And don't show anything if there are less than 100 sentences.
if (len(sentences_w_word) > 100):
print('starting process for word : %s'%word)
locs_and_data = neighbors(word, sentences_w_word)
with open('static/jsons/%s.json'%word, 'w') as outfile:
json.dump(locs_and_data, outfile)
# Store an updated json with the filtered words.
filtered_words = []
for word in os.listdir('static/jsons'):
word = word.split('.')[0]
filtered_words.append(word)
with open('static/filtered_words.json', 'w') as outfile:
json.dump(filtered_words, outfile)
print(filtered_words)
| 6,574 | 30.45933 | 102 | py |
interpretability | interpretability-master/text-dream/python/dream/mlm.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Computes the top predictions for a masked token using pretrained BERT."""
from absl import app
from absl import flags
from pytorch_pretrained_bert import modeling
from pytorch_pretrained_bert import tokenization
import torch
import sys
sys.path.insert(1, 'helpers')
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'The name of the model'
'configuration to load')
flags.DEFINE_integer('mask_word', 1, 'word to predict')
flags.DEFINE_integer('top_k', 10, 'how many predictions to output')
flags.DEFINE_bool('normalize', True, 'normalize the activation over other'
'activations')
def predict_masked_token(tokenizer, model, device):
"""Predict the tokens for a masked position.
Args:
tokenizer: Pretrained BERT tokenizer to convert the input.
model: The model to use for prediction.
device: Where new tensors are stored.
"""
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, '', mask_word=FLAGS.mask_word)
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
# Predict all tokens
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensor)
predicted_index = predictions[0, FLAGS.mask_word]
if FLAGS.normalize:
predicted_index = torch.nn.functional.softmax(predicted_index, -1)
values, indices = torch.topk(predicted_index, FLAGS.top_k)
predicted_tokens = tokenizer.convert_ids_to_tokens(
indices.data.cpu().numpy())
print('Indices: {}'.format(indices.data.cpu().numpy()))
print('Values: {}'.format(values.data.cpu().numpy()))
print('Tokens: {}'.format(predicted_tokens))
def main(_):
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(FLAGS.model_config)
# Load pre-trained model (weights)
model = modeling.BertForMaskedLM.from_pretrained(FLAGS.model_config)
model.eval()
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
predict_masked_token(tokenizer, model, device)
if __name__ == '__main__':
app.run(main)
| 3,024 | 37.782051 | 80 | py |
interpretability | interpretability-master/text-dream/python/dream/dream.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Used for DeepDream Experiments with BERT."""
from absl import app
from absl import flags
import torch
import sys
sys.path.insert(1, 'helpers')
import activation_helper
import attention_mask_helper
import embeddings_helper
import folder_helper
import inference_helper
import one_hots_helper
import output_helper
import setup_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('sentence2', u'', 'an optional sencond sentence')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'The name of the model'
'configuration to load')
flags.DEFINE_integer('num_iterations', 3000, 'number of optimization steps')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', 2, 'word to optimize activation for')
flags.DEFINE_integer('neuron_id', 414, 'neuron to optimize activation for')
flags.DEFINE_integer('dream_start', 1, 'first token that is to be changed in'
'the sentence')
flags.DEFINE_integer('dream_end', 0, 'first token that is to be changed in the'
'sentence')
flags.DEFINE_integer('warmup', 200, 'how long before the temperature of the'
'softmax gets adjusted')
flags.DEFINE_float('start_temp', 2.0, 'start-temperature of the softmax')
flags.DEFINE_float('end_temp', 0.1, 'end-temperature of the softmax')
flags.DEFINE_float('anneal', 0.9995, 'annealing factor for the temperature')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_bool('normalize', True, 'normalize the activation over other'
'activations')
flags.DEFINE_bool('gumbel', False, 'use gumbel noise with the softmax')
flags.DEFINE_bool('write_top_k', True, 'write top words for each iteration')
flags.DEFINE_integer('k', 10, 'number of top ranked words to store for each'
'iteration')
flags.DEFINE_integer('embedding_analysis', 0, 'frequency in which the embedding'
'is analyzed')
flags.DEFINE_integer('metrics_frequency', 250, 'frequency in which results are'
'saved')
def deep_dream(data, results, params, device, tokenizer, embedding_map, model):
"""Iteratively modifying the embedding using gradient descent.
Args:
data: Holds the top-k values.
results: Holds the results of the run.
params: Holds the parameters of the run.
device: The device to store the variables on.
tokenizer: The tokenizer to transform the input.
embedding_map: Holding all token embeddings.
model: The model that should dream.
"""
# An embedding for the tokens is obtained
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, FLAGS.sentence2)
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
_, pos_embeddings, sentence_embeddings = embeddings_helper.get_embeddings(
tokens_tensor, segments_tensor, model)
# Correct the end of the dream if necessary
if FLAGS.dream_end == 0:
FLAGS.dream_end = len(tokens) - 2
# Write the parameters to a file
output_helper.get_params(params, FLAGS, tokens,
embedding_ana=FLAGS.embedding_analysis)
# Get the smooth one-hot vector that is to be optimized, split into static and
# modifiable parts
before, modify, after = one_hots_helper.get_one_hots(
tokens_tensor.data.cpu().numpy(), FLAGS.dream_start, FLAGS.dream_end,
device)
# Obtain the default attention mask to be able to run the model
attention_mask = attention_mask_helper.get_attention_mask(tokens_tensor)
# The optimizer used to modify the input embedding
optimizer = torch.optim.Adam([modify], lr=FLAGS.learning_rate)
# Init temperature for Gumbel
temperature = torch.tensor(FLAGS.start_temp, device=device,
requires_grad=False)
# Obtain the properties of the initial embedding
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
max_values, tokens_ids = one_hots_helper.get_tokens_from_one_hots(
torch.cat([before, one_hots_sm, after], dim=1))
numpy_max_values = max_values.data.cpu().numpy()
ids = tokens_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, attention_mask,
FLAGS.dream_start, FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, FLAGS.normalize, embedding_map, model, device,
average=True)
output_helper.init_results(results)
# Optimize the embedding for i iterations and update the properties to
# evaluate the result in each step
for i in range(FLAGS.num_iterations):
max_vals, tokens_ids, activation, emb_tok, emb_act = optimizer_step(
optimizer, before, modify, after, pos_embeddings, sentence_embeddings,
attention_mask, temperature, i, data, tokenizer, embedding_map, model,
device)
# Write the properties of the last step
if (i % FLAGS.metrics_frequency) == 0:
output_helper.get_metrics(
tokens, i, temperature, numpy_max_values, results,
activation=activation, ids_activation=ids_activation,
emb_tokens=emb_tok, emb_activation=emb_act,
emb_ana=FLAGS.embedding_analysis, iterations=FLAGS.num_iterations)
# Set the numpy max values
numpy_max_values = max_vals.data.cpu().numpy()
# Obtain the activation property for the id-array that would result from the
# optimization
ids = tokens_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
# Calculate the activation using the highest scoring words
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, attention_mask,
FLAGS.dream_start, FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, FLAGS.normalize, embedding_map, model, device,
average=True)
# Check if the temperature needs to decrease
if i > FLAGS.warmup:
temperature = torch.clamp(temperature * FLAGS.anneal, FLAGS.end_temp)
# Calculate the final activation just as before, but without backprop
if (FLAGS.num_iterations % FLAGS.metrics_frequency) == 0:
with torch.no_grad():
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
if FLAGS.write_top_k:
output_helper.get_top_ks(
fused_one_hots, FLAGS.k, FLAGS.num_iterations, data,
FLAGS.dream_start, FLAGS.dream_end, tokenizer,
activation=activation)
layer_activations = inference_helper.run_inference(
before, one_hots_sm, after, pos_embeddings, sentence_embeddings,
attention_mask, embedding_map, model)
activation = activation_helper.get_activation(
layer_activations, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id,
FLAGS.normalize)
emb_tok, emb_act = embeddings_helper.analyze_current_embedding(
fused_one_hots, embedding_map, FLAGS.dream_start, FLAGS.dream_end,
device, pos_embeddings, sentence_embeddings, attention_mask, model,
FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, FLAGS.normalize,
tokenizer)
output_helper.get_metrics(
tokens, FLAGS.num_iterations, temperature,
numpy_max_values, results, activation=activation,
ids_activation=ids_activation, emb_tokens=emb_tok,
emb_activation=emb_act, emb_ana=FLAGS.embedding_analysis,
iterations=FLAGS.num_iterations)
def optimizer_step(optimizer, before, modify, after, pos_embeddings,
sentence_embeddings, attention_mask, temperature, iteration,
data, tokenizer, embedding_map, model, device):
"""Make a step along the gradient of the optimizer.
Args:
optimizer: The optimizer that is used for gradient decent.
before: Embeddings of everything up to the modifyable content.
modify: Embeddings of the modifyable content.
after: Embeddings of everything after the modifyable content.
pos_embeddings: Positional embeddings of the current sequence.
sentence_embeddings: Sentence embeddings of the current sequence.
attention_mask: Attention mask to be used with the current sequence.
temperature: Current temperature of the softmax function.
iteration: Current iteration of the optimization.
data: Top-k data to be written after optimization.
tokenizer: Converts between tokens and their ids.
embedding_map: Holding the embeddings for each token.
model: The model to be used with this optimization.
device: Where to store the variables.
Returns:
max_values: The values of the tokens with the highest softmax value.
token_ids: The ids of the tokens with the highest softmax value.
activation: The activation of the current input representation.
emb_tokens: The tokens of the closest embedding representing real tokens.
emb_activation: Activation for closest embedding representing real tokens.
"""
# Reset the gradient
optimizer.zero_grad()
# Softmax over the one-hots
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
# Check if the embedding analysis is to be done
emb_tokens = None
emb_activation = None
if FLAGS.embedding_analysis != 0:
if iteration % FLAGS.embedding_analysis == 0:
tok, act = embeddings_helper.analyze_current_embedding(
fused_one_hots, embedding_map, FLAGS.dream_start, FLAGS.dream_end,
device, pos_embeddings, sentence_embeddings, attention_mask, model,
FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, FLAGS.normalize,
tokenizer)
emb_tokens = tok
emb_activation = act
# Get the activation
layer_activations = inference_helper.run_inference(
before, one_hots_sm, after, pos_embeddings, sentence_embeddings,
attention_mask, embedding_map, model)
activation = activation_helper.get_activation(
layer_activations, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id,
FLAGS.normalize)
# Check if top_k should be written
if FLAGS.write_top_k:
output_helper.get_top_ks(
fused_one_hots, FLAGS.k, iteration, data, FLAGS.dream_start,
FLAGS.dream_end, tokenizer, activation=activation)
# Calculate the loss as an inverse activation of the layer to be optimised for
# (adam wants to minimize this value, we want to maximize it)
loss = -activation
# Backpropagate the loss
loss.backward(retain_graph=True)
# Optimize the word vector based on that loss
optimizer.step()
# Get the actual tokens and distances to the embedding for this modified
# embedding
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
max_values, token_ids = one_hots_helper.get_tokens_from_one_hots(
fused_one_hots)
return max_values, token_ids, activation, emb_tokens, emb_activation
def get_dream(device, tokenizer, embedding_map, model, base_path):
"""Obtain a dream from the modle given the parameters passed by the user.
Args:
device: The device to use for training the model.
tokenizer: Used to convert between sentences, tokens, and ids.
embedding_map: Map containing all the pretrained embeddings of the model.
model: BERT model used for the dreaming process.
base_path: Location of where to write the results.
"""
data = []
results = {}
params = {}
# Actually do the optimization
deep_dream(data, results, params, device, tokenizer, embedding_map, model)
# If the top k file is to be written, write it
if FLAGS.write_top_k:
output_helper.write_top_ks(base_path, data, FLAGS.dream_start, params)
output_helper.write_results(base_path, results, params, 'dream')
def main(_):
# Set up everything needed for dreaming
tokenizer, model, device, embedding_map = setup_helper.setup_uncased(
FLAGS.model_config)
# Make a directory for the current run
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
path = folder_helper.make_timestamp_directory(FLAGS.output_dir,
prefix='dream')
# Start the run
get_dream(device, tokenizer, embedding_map, model, path)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 13,732 | 46.355172 | 80 | py |
interpretability | interpretability-master/text-dream/python/dream/reconstruct_changed_activation.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reconstructs activation of a sentence where one token activation is changed."""
import json
import os
from absl import app
from absl import flags
import numpy as np
import torch
import torch.nn.functional as F
import sys
sys.path.insert(1, 'helpers')
import activation_helper
import attention_mask_helper
import embeddings_helper
import folder_helper
import inference_helper
import one_hots_helper
import optimization_helper
import output_helper
import setup_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('sentence2', u'', 'an optional sencond sentence')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_string('change_activation_dir', None, 'the file that holds the'
'activation that we change a word to')
flags.DEFINE_string('change_activation_file', None, 'the file that holds the'
'activation that we change a word to')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'the name of the model'
'configuration to load')
flags.DEFINE_string('target', None, 'target of the shifted activation process')
flags.DEFINE_integer('num_iterations', 10, 'number of optimization steps')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', None, 'word to optimize activation for')
flags.DEFINE_integer('neuron_id', None, 'neuron to optimize activation for')
flags.DEFINE_integer('change_id', 1, 'token activation that is to be changed')
flags.DEFINE_integer('dream_start', 1, 'first token that is to be changed in'
'the sentence')
flags.DEFINE_integer('dream_end', 0, 'last token that is to be changed in the'
'sentence')
flags.DEFINE_integer('warmup', 200, 'how long before the temperature of the'
'softmax gets adjusted')
flags.DEFINE_integer('metrics_frequency', 250, 'frequency in which results are'
'saved')
flags.DEFINE_float('start_temp', 2.0, 'start-temperature of the softmax')
flags.DEFINE_float('end_temp', 0.1, 'end-temperature of the softmax')
flags.DEFINE_float('anneal', 0.9995, 'annealing factor for the temperature')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_bool('gumbel', False, 'use gumbel noise with the softmax')
flags.DEFINE_bool('write_top_k', False, 'write top words for each iteration')
flags.DEFINE_integer('k', 10, 'number of top ranked words to store for each'
'iteration')
def change_target_activation(target_activation, device):
"""Change the target activation to the desired one.
Args:
target_activation: The old target activation to be changed.
device: Device to load variables to.
Returns:
target_activation: The new, changed target activation to optimize for.
"""
change_path = os.path.join(FLAGS.change_activation_dir, str(FLAGS.layer_id),
FLAGS.change_activation_file)
change_file = open(change_path, 'rb')
change_np = np.load(change_file)
change_tensor = torch.tensor(change_np)
change_tensor = change_tensor.to(device)
target_activation[FLAGS.change_id] = change_tensor
return target_activation
def deep_dream(data, results, params, device, tokenizer, embedding_map, model):
"""Deep dream to a target activation.
Args:
data: Holds the top-k values.
results: Holds the results of the run.
params: Holds the parameters of the run.
device: Where to place new variables.
tokenizer: Used to convert between ids and tokens.
embedding_map: Holding all BERT token embeddings.
model: The model used for this dream.
"""
# An embedding for the tokens is obtained
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, FLAGS.sentence2)
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
_, pos_embeddings, sentence_embeddings = embeddings_helper.get_embeddings(
tokens_tensor, segments_tensor, model)
# Correct the end of the dream if necessary
if FLAGS.dream_end == 0:
FLAGS.dream_end = len(tokens) - 2
# Write the parameters to a file
output_helper.get_params(params, FLAGS, tokens)
# Get the smooth one-hot vector that is to be optimized, split into static and
# modifiable parts
before, modify, after = one_hots_helper.get_one_hots(
tokens_tensor.data.cpu().numpy(), FLAGS.dream_start, FLAGS.dream_end,
device)
modify = torch.randn(modify.shape, device=device, requires_grad=True)
# Obtain the default attention mask to be able to run the model
att_mask = attention_mask_helper.get_attention_mask(tokens_tensor)
# The optimizer used to modify the input embedding
optimizer = torch.optim.Adam([modify], lr=FLAGS.learning_rate)
# Init temperature for Gumbel
temperature = torch.tensor(FLAGS.start_temp, device=device,
requires_grad=False)
# Obtain the target activation we try to optimize towards.
target_ids = tokens_tensor.data.cpu().numpy()[0]
target_activation = activation_helper.get_ids_activation(
target_ids, pos_embeddings, sentence_embeddings, att_mask,
FLAGS.dream_start, FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, False, embedding_map, model, device)
target_activation = change_target_activation(target_activation, device)
target_activation = target_activation.clone().detach().requires_grad_(False)
# Obtain the properties of the initial embedding
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
max_values, token_ids = one_hots_helper.get_tokens_from_one_hots(
torch.cat([before, one_hots_sm, after], dim=1))
numpy_max_values = max_values.data.cpu().numpy()
ids = token_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,
FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,
embedding_map, model, device)
# Write the initial stuff for the results file
output_helper.init_results(results)
# Optimize the embedding for i iterations and update the properties to
# evaluate the result in each step
for i in range(FLAGS.num_iterations):
# Do an optimization step
max_vals, token_ids, loss = optimization_helper.step_towards_activation(
optimizer, before, modify, after, pos_embeddings,
sentence_embeddings, att_mask, temperature, i, FLAGS.gumbel,
FLAGS.write_top_k, FLAGS.k, data, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, FLAGS.dream_start, FLAGS.dream_end, tokenizer,
embedding_map, model, target_activation)
# Write the properties of the last step
ids_loss = F.mse_loss(ids_activation, target_activation)
if (i % FLAGS.metrics_frequency) == 0:
output_helper.get_metrics(
tokens, i, temperature, numpy_max_values, results,
loss=loss, ids_loss=ids_loss)
# Set the numpy max values
numpy_max_values = max_vals.data.cpu().numpy()
# Obtain the activation property for the id-array that would result from the
# optimization
ids = token_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
# Calculate the activation using the highest scoring words
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,
FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,
embedding_map, model, device)
# Check if the temperature needs to decrease
if i > FLAGS.warmup:
temperature = torch.clamp(temperature * FLAGS.anneal, FLAGS.end_temp)
# Calculate the final activation just as before, but without backprop
if (FLAGS.num_iterations % FLAGS.metrics_frequency) == 0:
with torch.no_grad():
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
if FLAGS.write_top_k:
output_helper.write_top_ks(fused_one_hots, FLAGS.k,
FLAGS.num_iterations, data,
FLAGS.dream_start, FLAGS.dream_end,
tokenizer)
layers = inference_helper.run_inference(before, one_hots_sm, after,
pos_embeddings,
sentence_embeddings, att_mask,
embedding_map, model)
activation = activation_helper.get_activations(
layers, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id)
loss = F.mse_loss(activation, target_activation)
ids_loss = F.mse_loss(ids_activation, target_activation)
output_helper.get_metrics(
tokens, FLAGS.num_iterations, temperature, numpy_max_values, results,
loss=loss, ids_loss=ids_loss)
def reconstruct_changed_activation(device, tokenizer, emb_map, model):
"""Reconstruct the activation for a given sentence after they have been shifted.
Args:
device: The device to use for training the model.
tokenizer: Used to convert between sentences, tokens, and ids.
emb_map: Map containing all the pretrained embeddings of the model.
model: BERT model used for the dreaming process.
"""
data = []
results = {}
params = {}
# Create a folder for this experiment
layer_dir = os.path.join(FLAGS.output_dir, str(FLAGS.layer_id))
folder_helper.make_folder_if_not_exists(layer_dir)
# Actually do the optimization
deep_dream(data, results, params, device, tokenizer, emb_map, model)
# If the top k file is to be written, write it
if FLAGS.write_top_k:
for i in range(len(data)):
top_k_path = os.path.join(layer_dir, 'top_k' + str(i) + '.json')
top_k_file = open(top_k_path, 'w')
json.dump(data[i], top_k_file)
top_k_file.close()
output_helper.write_results(layer_dir, results, params,
'reconstruct_changed')
def main(_):
tokenizer, model, device, emb_map = setup_helper.setup_uncased(
FLAGS.model_config)
# Make a directory for the current run
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
# Start the run
reconstruct_changed_activation(device, tokenizer, emb_map, model)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('change_activation_dir')
flags.mark_flag_as_required('change_activation_file')
flags.mark_flag_as_required('target')
app.run(main)
| 11,710 | 45.472222 | 82 | py |
interpretability | interpretability-master/text-dream/python/dream/dream_mlm.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maximally activate the prediction of a masked token."""
from absl import app
from absl import flags
import torch
import sys
sys.path.insert(1, 'helpers')
import attention_mask_helper
import embeddings_helper
import folder_helper
import inference_helper
import one_hots_helper
import output_helper
import setup_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('sentence2', u'', 'an optional sencond sentence')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'The name of the model'
'configuration to load')
flags.DEFINE_integer('num_iterations', 1000, 'number of optimization steps')
flags.DEFINE_integer('maximize_word', 2, 'word to maximize the prediction'
'probability for')
flags.DEFINE_integer('maximize_id', 3124, 'token id to maximize the prediction'
'probability for')
flags.DEFINE_integer('dream_before_start', 1, 'index of the start of all'
'changed words before the word to be maximized for')
flags.DEFINE_integer('dream_before_end', 1, 'index of the end of all'
'changed words before the word to be maximized for')
flags.DEFINE_integer('dream_after_start', 3, 'index of the start of all'
'changed words after the word to be maximized for')
flags.DEFINE_integer('dream_after_end', 5, 'index of the end of all'
'changed words after the word to be maximized for')
flags.DEFINE_integer('warmup', 200, 'how long before the temperature of the'
'softmax gets adjusted')
flags.DEFINE_float('start_temp', 2.0, 'start-temperature of the softmax')
flags.DEFINE_float('end_temp', 0.1, 'end-temperature of the softmax')
flags.DEFINE_float('anneal', 0.9995, 'annealing factor for the temperature')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_bool('normalize', True, 'normalize the activation over other'
'activations')
flags.DEFINE_bool('gumbel', False, 'use gumbel noise with the softmax')
flags.DEFINE_integer('metrics_frequency', 250, 'frequency in which results are'
'saved')
def get_prediction(prediction_scores, maximize_word, maximize_id, normalize):
"""Get the prediction for the current input.
Args:
prediction_scores: The scores that the model has output.
maximize_word: The word id for which the activation should be maximized.
maximize_id: The token id that we wish to be maximized for.
normalize: Whether to normalize the prediction value to sum to one.
Returns:
prediction_score: Current prediction score for said word/token combination.
"""
if normalize:
return torch.nn.functional.softmax(
prediction_scores[0][maximize_word], -1)[maximize_id]
else:
return prediction_scores[0][maximize_word][maximize_id]
def get_ids_prediction(ids, pos_embeddings, sentence_embeddings,
attention_mask, maximize_word, maximize_id, normalize,
embedding_map, model, device, cbs, cbe, cas, cae):
"""Get the prediction score for an id-sequence.
Args:
ids: The ids to get the activations for.
pos_embeddings: Positional embeddings to run inference with.
sentence_embeddings: Sentence embeddings to run inference with.
attention_mask: Attention mask used during inference.
maximize_word: Word for the activation to be fetched of.
maximize_id: Id of the activation to be maximized for.
normalize: Whether to normalize the activations.
embedding_map: The embedding map used to get embeddings from one-hots.
model: Model to run inference on.
device: Where to place new variables.
cbs: The index of the start of the changeable part before maximize_word.
cbe: The index of the end of the changeable part before maximize_word.
cas: The index of the start of the changeable part after maximize_word.
cae: The index of the end of the changeable part after maximize_word.
Returns:
prediction_score: The requested prediction score for the activation.
"""
# Get a one_hot token for these ids
before, change1, max_part, change2, after = one_hots_helper.get_one_hots_mlm(
ids, cbs, cbe, cas, cae, device)
# Do not apply a gradient to this model run
with torch.no_grad():
one_hots = torch.cat([before, change1, max_part, change2, after], dim=1)
prediction_scores = inference_helper.run_inference_mlm(
one_hots, pos_embeddings, sentence_embeddings, attention_mask,
embedding_map, model)
return get_prediction(prediction_scores, maximize_word, maximize_id,
normalize)
def deep_dream(results, params, device, tokenizer, embedding_map, model):
"""Deep dream to maximally activate the class probability for a token.
Args:
results: Holds the results of the run.
params: Holds the parameters of the run.
device: The device to store the variables on.
tokenizer: The tokenizer to transform the input.
embedding_map: Holding all token embeddings.
model: The model that should dream.
"""
# An embedding for the tokens is obtained
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, FLAGS.sentence2,
mask_word=FLAGS.maximize_word)
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
_, pos_embeddings, sentence_embeddings = embeddings_helper.get_embeddings(
tokens_tensor, segments_tensor, model.bert)
# Write the parameters to a file
output_helper.get_params_mlm(params, FLAGS, tokens)
# Get the smooth one-hot vector that is to be optimized, split into static and
# modifiable parts
before, change1, max_part, change2, after = one_hots_helper.get_one_hots_mlm(
tokens_tensor.data.cpu().numpy(), FLAGS.dream_before_start,
FLAGS.dream_before_end, FLAGS.dream_after_start, FLAGS.dream_after_end,
device)
# Obtain the default attention mask to be able to run the model
attention_mask = attention_mask_helper.get_attention_mask(tokens_tensor)
# The optimizer used to modify the input embedding
optimizer = torch.optim.Adam([change1, change2], lr=FLAGS.learning_rate)
# Init temperature for Gumbel
temperature = torch.tensor(FLAGS.start_temp, device=device,
requires_grad=False)
# Obtain the properties of the initial embedding
one_hots_sm_1 = one_hots_helper.softmax_one_hots(change1, temperature,
FLAGS.gumbel)
one_hots_sm_2 = one_hots_helper.softmax_one_hots(change2, temperature,
FLAGS.gumbel)
max_values, tokens_ids = one_hots_helper.get_tokens_from_one_hots(
torch.cat([before, one_hots_sm_1, max_part, one_hots_sm_2, after], dim=1))
numpy_max_values = max_values.data.cpu().numpy()
ids = tokens_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
ids_prediction = get_ids_prediction(
ids, pos_embeddings, sentence_embeddings, attention_mask,
FLAGS.maximize_word, FLAGS.maximize_id, FLAGS.normalize, embedding_map,
model, device, FLAGS.dream_before_start, FLAGS.dream_before_end,
FLAGS.dream_after_start, FLAGS.dream_after_end)
output_helper.init_results(results)
# Optimize the embedding for i iterations and update the properties to
# evaluate the result in each step
for i in range(FLAGS.num_iterations):
max_vals, tokens_ids, prediction = optimizer_step(
optimizer, before, change1, max_part, change2, after, pos_embeddings,
sentence_embeddings, attention_mask, temperature, embedding_map, model)
# Write the properties of the last step
if (i % FLAGS.metrics_frequency) == 0:
output_helper.get_metrics_mlm(
tokens, prediction, ids_prediction, i, temperature, numpy_max_values,
results)
# Set the numpy max values
numpy_max_values = max_vals.data.cpu().numpy()
# Obtain the activation property for the id-array that would result from the
# optimization
ids = tokens_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
# Calculate the activation using the highest scoring words
ids_prediction = get_ids_prediction(
ids, pos_embeddings, sentence_embeddings, attention_mask,
FLAGS.maximize_word, FLAGS.maximize_id, FLAGS.normalize, embedding_map,
model, device, FLAGS.dream_before_start, FLAGS.dream_before_end,
FLAGS.dream_after_start, FLAGS.dream_after_end)
# Check if the temperature needs to decrease
if i > FLAGS.warmup:
temperature = torch.clamp(temperature * FLAGS.anneal, FLAGS.end_temp)
# Calculate the final activation just as before, but without backprop
if (FLAGS.num_iterations % FLAGS.metrics_frequency) == 0:
with torch.no_grad():
one_hots_sm_1 = one_hots_helper.softmax_one_hots(change1, temperature,
FLAGS.gumbel)
one_hots_sm_2 = one_hots_helper.softmax_one_hots(change2, temperature,
FLAGS.gumbel)
fused = torch.cat([before, one_hots_sm_1, max_part, one_hots_sm_2, after],
dim=1)
prediction_score = inference_helper.run_inference_mlm(
fused, pos_embeddings, sentence_embeddings, attention_mask,
embedding_map, model)
prediction = get_prediction(prediction_score, FLAGS.maximize_word,
FLAGS.maximize_id, FLAGS.normalize)
output_helper.get_metrics_mlm(
tokens, prediction, ids_prediction, FLAGS.num_iterations, temperature,
numpy_max_values, results)
def optimizer_step(optimizer, before, change1, max_part, change2, after,
pos_embeddings, sentence_embeddings, attention_mask,
temperature, embedding_map, model):
"""Optimize the sentence towards the target activation.
Args:
optimizer: The optimizer to be used.
before: The tensor for everything before the modifyable content.
change1: Modifyable content before the word to be maximized for.
max_part: The static tensor around the word to be maximized for.
change2: Modifyable content after the word to be maximized for.
after: The tensor for everything after the modifiable content.
pos_embeddings: The positional embeddings used for inference.
sentence_embeddings: The sentence embeddings for inference.
attention_mask: The attention mask used for inference.
temperature: The temperature used for making the softmax spike.
embedding_map: Holding all the token embeddings for BERT.
model: Model to run inference on.
Returns:
max_values: The maximal values for the current token representations.
token_ids: The token ids of the current representation.
prediction: The current prediction score of the word to be maximized.
"""
# Reset the gradient
optimizer.zero_grad()
# Softmax over the one-hots
one_hots_sm_1 = one_hots_helper.softmax_one_hots(change1, temperature,
FLAGS.gumbel)
one_hots_sm_2 = one_hots_helper.softmax_one_hots(change2, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm_1, max_part, one_hots_sm_2,
after], dim=1)
# Get the prediction
prediction_score = inference_helper.run_inference_mlm(
fused_one_hots, pos_embeddings, sentence_embeddings, attention_mask,
embedding_map, model)
prediction = get_prediction(prediction_score, FLAGS.maximize_word,
FLAGS.maximize_id, FLAGS.normalize)
# Calculate the loss as an inverse activation of the layer to be optimised for
# (adam wants to minimize this value, we want to maximize it)
loss = -prediction
# Backpropagate the loss
loss.backward(retain_graph=True)
# Optimize the word vector based on that lossone_hotsone_hots
optimizer.step()
# Get the actual tokens and distances to the embedding for this modified
# embedding
one_hots_sm_1 = one_hots_helper.softmax_one_hots(change1, temperature,
FLAGS.gumbel)
one_hots_sm_2 = one_hots_helper.softmax_one_hots(change2, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm_1, max_part, one_hots_sm_2,
after], dim=1)
max_values, token_ids = one_hots_helper.get_tokens_from_one_hots(
fused_one_hots)
return max_values, token_ids, prediction
def get_dream(device, tokenizer, embedding_map, model, base_path):
"""Obtain a dream from the modle given the parameters passed by the user.
Args:
device: The device to use for training the model.
tokenizer: Used to convert between sentences, tokens, and ids.
embedding_map: Map containing all the pretrained embeddings of the model.
model: BERT model used for the dreaming process.
base_path: Location of where to write the results.
"""
results = {}
params = {}
# Actually do the optimization
deep_dream(results, params, device, tokenizer, embedding_map, model)
output_helper.write_results(base_path, results, params, 'dream_mlm')
# Main function for setting everything up and starting optimization
def main(_):
# Set up everything needed for dreaming
tokenizer, model, device, embedding_map = setup_helper.setup_bert_mlm(
FLAGS.model_config)
# Make a directory for the current run
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
path = folder_helper.make_timestamp_directory(FLAGS.output_dir,
prefix='dream_mlm')
# Start the run
get_dream(device, tokenizer, embedding_map, model, path)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 14,960 | 46.646497 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.