code stringlengths 17 6.64M |
|---|
def compute_cls_reg_metrics(predictions, labels):
predictions = np.array(predictions)
labels = np.array(labels)
correct = ((np.array(predictions) * np.array(labels)) > 0)
positive = (np.array(labels) < 0)
acc = correct.mean()
recall = ((correct * positive).sum() / (positive.sum() + np.spacing(1)))
precision = ((correct * positive).sum() / (predictions < 0).sum())
if (len(set(positive)) == 1):
print('only one class, auroc/mcc are not well defined')
auroc = (- 1)
mcc = (- 1)
else:
auroc = roc_auc_score(positive, (- predictions))
mcc = matthews_corrcoef(positive, (predictions < 0))
mae = np.mean(np.abs((labels - predictions)))
rmse = np.sqrt(np.mean(((labels - predictions) ** 2)))
pears = stats.pearsonr(labels, predictions)[0]
spear = stats.spearmanr(labels, predictions)[0]
return {'n_eval_muts': len(labels), 'cls_mcc': mcc, 'cls_auroc': auroc, 'cls_acc': acc, 'cls_recall': recall, 'cls_precision': precision, 'mae': mae, 'rmse': rmse, 'pears': pears, 'spear': spear}
|
def gather_dict_keys_on_main(d):
"\n each process has dictionary with different keys, gather all keys on main process\n e.g. P1: {'6ij6': ...}\n P2: {'4me3': ...}\n ret: P1: {'6ij6': ..., '4me3': ...}\n P2: None\n "
ws = get_world_size()
if (ws == 1):
return d
all_d_list = ([None for _ in range(ws)] if is_main_process() else None)
dist.gather_object(d, all_d_list)
if (not is_main_process()):
return None
d_gathered = {}
for d_local in all_d_list:
d_gathered.update(d_local)
return d_gathered
|
def param_groups_weight_decay(model: nn.Module, weight_decay=1e-05, no_weight_decay_list=()):
no_weight_decay_list = set(no_weight_decay_list)
decay = []
no_decay = []
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((param.ndim <= 1) or name.endswith('.bias') or (name in no_weight_decay_list)):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0}, {'params': decay, 'weight_decay': weight_decay}]
|
def adjust_learning_rate(optimizer, epoch, args):
'Decay the learning rate with half-cycle cosine after warmup'
if (epoch < args.warmup_epochs):
lr = ((args.lr * epoch) / args.warmup_epochs)
else:
lr = (args.min_lr + (((args.lr - args.min_lr) * 0.5) * (1.0 + math.cos(((math.pi * (epoch - args.warmup_epochs)) / (args.epochs - args.warmup_epochs))))))
for param_group in optimizer.param_groups:
if ('lr_scale' in param_group):
param_group['lr'] = (lr * param_group['lr_scale'])
else:
param_group['lr'] = lr
return
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
'\n Warning: does not synchronize the deque!\n '
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = (force or (get_world_size() > 8))
if (is_master or force):
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='')
builtin_print(*args, **kwargs)
builtins.print = print
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
setup_for_distributed(is_master=True)
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0))
|
class NativeScalerWithGradNormCount():
state_dict_key = 'amp_scaler'
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if (clip_grad is not None):
assert (parameters is not None)
self._scaler.unscale_(optimizer)
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
|
def get_grad_norm_(parameters, norm_type: float=2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if (p.grad is not None)]
norm_type = float(norm_type)
if (len(parameters) == 0):
return torch.tensor(0.0)
device = parameters[0].grad.device
if (norm_type == inf):
total_norm = max((p.grad.detach().abs().max().to(device) for p in parameters))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
|
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if True:
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'epoch': epoch, 'args': args}
save_on_master(to_save, checkpoint_path)
return checkpoint_paths
|
def load_model(args, model_without_ddp, optimizer, loss_scaler):
if (args.finetune and (not args.resume)):
print(f'Loading finetune checkpoint from {args.finetune}')
checkpoint = torch.load(args.finetune, map_location='cpu')
if ('module.' in list(checkpoint['model'].keys())[0]):
checkpoint['model'] = {k[len('module.'):]: v for (k, v) in checkpoint['model'].items()}
msg = model_without_ddp.load_state_dict(checkpoint['model'], strict=False)
print(msg)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
if ('module.' in list(checkpoint['model'].keys())[0]):
checkpoint['model'] = {k[len('module.'):]: v for (k, v) in checkpoint['model'].items()}
model_without_ddp.load_state_dict(checkpoint['model'])
print(('Resume checkpoint %s' % args.resume))
if (('optimizer' in checkpoint) and ('epoch' in checkpoint) and (not (hasattr(args, 'eval') and args.eval))):
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = (checkpoint['epoch'] + 1)
if ('scaler' in checkpoint):
loss_scaler.load_state_dict(checkpoint['scaler'])
print('With optim & sched!')
|
def all_reduce_mean(x):
world_size = get_world_size()
if (world_size > 1):
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
|
class MutateEverything(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.backbone = create_backbone(args)
self.aa_expansion = create_aa_expander(args, self.backbone)
self.single_decoder = create_single_decoder(args)
self.multi_decoder = create_multi_decoder(args)
def forward(self, x, batch):
pred = {}
pred.update(self.backbone(x, batch))
pred.update(self.aa_expansion(x, batch, pred))
pred.update(self.single_decoder(x, batch, pred))
pred.update(self.multi_decoder(x, batch, pred))
return pred
|
def mem_inputs_to_device(batch, device, args):
if (args.backbone == 'af'):
x = [{k: v.to(device, non_blocking=True) for (k, v) in x.items()} for x in batch['af_inputs']]
elif ('esm' in args.backbone):
x = batch['tokens'].to(device, non_blocking=True)
return x
|
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, activation='relu', normalize_before=False):
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = (tgt + self.dropout(tgt2))
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = (tgt + self.dropout(tgt2))
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
|
def _get_activation_fn(activation):
'Return an activation function given a string'
if (activation == 'relu'):
return F.relu
if (activation == 'gelu'):
return F.gelu
if (activation == 'glu'):
return F.glu
raise RuntimeError(f'activation should be relu/gelu, not {activation}.')
|
def set_inf(c, inf):
for (k, v) in c.items():
if isinstance(v, mlc.ConfigDict):
set_inf(v, inf)
elif (k == 'inf'):
c[k] = inf
|
def enforce_config_constraints(config):
def string_to_setting(s):
path = s.split('.')
setting = config
for p in path:
setting = setting[p]
return setting
mutually_exclusive_bools = [('model.template.average_templates', 'model.template.offload_templates'), ('globals.use_lma', 'globals.use_flash')]
for (s1, s2) in mutually_exclusive_bools:
s1_setting = string_to_setting(s1)
s2_setting = string_to_setting(s2)
if (s1_setting and s2_setting):
raise ValueError(f'Only one of {s1} and {s2} may be set at a time')
fa_is_installed = (importlib.util.find_spec('flash_attn') is not None)
if (config.globals.use_flash and (not fa_is_installed)):
raise ValueError('use_flash requires that FlashAttention is installed')
if (config.globals.offload_inference and (not config.model.template.average_templates)):
config.model.template.offload_templates = True
|
def model_config(name, train=False, low_prec=False, long_sequence_inference=False):
c = copy.deepcopy(config)
if (name == 'initial_training'):
pass
elif (name == 'finetuning'):
c.data.train.crop_size = 384
c.data.train.max_extra_msa = 5120
c.data.train.max_msa_clusters = 512
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
elif (name == 'finetuning_ptm'):
c.data.train.max_extra_msa = 5120
c.data.train.crop_size = 384
c.data.train.max_msa_clusters = 512
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif (name == 'finetuning_no_templ'):
c.data.train.crop_size = 384
c.data.train.max_extra_msa = 5120
c.data.train.max_msa_clusters = 512
c.model.template.enabled = False
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
elif (name == 'finetuning_no_templ_ptm'):
c.data.train.crop_size = 384
c.data.train.max_extra_msa = 5120
c.data.train.max_msa_clusters = 512
c.model.template.enabled = False
c.loss.violation.weight = 1.0
c.loss.experimentally_resolved.weight = 0.01
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif (name == 'model_1'):
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
elif (name == 'model_2'):
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
elif (name == 'model_3'):
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
elif (name == 'model_4'):
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
elif (name == 'model_5'):
c.model.template.enabled = False
elif (name == 'model_1_ptm'):
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif (name == 'model_2_ptm'):
c.data.common.reduce_max_clusters_by_max_templates = True
c.data.common.use_templates = True
c.data.common.use_template_torsion_angles = True
c.model.template.enabled = True
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif (name == 'model_3_ptm'):
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif (name == 'model_4_ptm'):
c.data.train.max_extra_msa = 5120
c.data.predict.max_extra_msa = 5120
c.model.template.enabled = False
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
elif (name == 'model_5_ptm'):
c.model.template.enabled = False
c.model.heads.tm.enabled = True
c.loss.tm.weight = 0.1
else:
raise ValueError('Invalid model name')
if long_sequence_inference:
assert (not train)
c.globals.offload_inference = True
c.globals.use_lma = True
c.globals.use_flash = False
c.model.template.offload_inference = True
c.model.template.template_pair_stack.tune_chunk_size = False
c.model.extra_msa.extra_msa_stack.tune_chunk_size = False
c.model.evoformer_stack.tune_chunk_size = False
if train:
c.globals.blocks_per_ckpt = 1
c.globals.chunk_size = None
c.globals.use_lma = False
c.globals.offload_inference = False
c.model.template.average_templates = False
c.model.template.offload_templates = False
if low_prec:
c.globals.eps = 0.0001
set_inf(c, 10000.0)
enforce_config_constraints(c)
return c
|
class Dropout(nn.Module):
'\n Implementation of dropout with the ability to share the dropout mask\n along a particular dimension.\n\n If not in training mode, this module computes the identity function.\n '
def __init__(self, r: float, batch_dim: Union[(int, List[int])]):
'\n Args:\n r:\n Dropout rate\n batch_dim:\n Dimension(s) along which the dropout mask is shared\n '
super(Dropout, self).__init__()
self.r = r
if (type(batch_dim) == int):
batch_dim = [batch_dim]
self.batch_dim = batch_dim
self.dropout = nn.Dropout(self.r)
def forward(self, x: torch.Tensor) -> torch.Tensor:
'\n Args:\n x:\n Tensor to which dropout is applied. Can have any shape\n compatible with self.batch_dim\n '
shape = list(x.shape)
if (self.batch_dim is not None):
for bd in self.batch_dim:
shape[bd] = 1
mask = x.new_ones(shape)
mask = self.dropout(mask)
x *= mask
return x
|
class DropoutRowwise(Dropout):
'\n Convenience class for rowwise dropout as described in subsection\n 1.11.6.\n '
__init__ = partialmethod(Dropout.__init__, batch_dim=(- 3))
|
class DropoutColumnwise(Dropout):
'\n Convenience class for columnwise dropout as described in subsection\n 1.11.6.\n '
__init__ = partialmethod(Dropout.__init__, batch_dim=(- 2))
|
class AuxiliaryHeads(nn.Module):
def __init__(self, config):
super(AuxiliaryHeads, self).__init__()
self.plddt = PerResidueLDDTCaPredictor(**config['lddt'])
self.distogram = DistogramHead(**config['distogram'])
self.masked_msa = MaskedMSAHead(**config['masked_msa'])
self.experimentally_resolved = ExperimentallyResolvedHead(**config['experimentally_resolved'])
if config.tm.enabled:
self.tm = TMScoreHead(**config.tm)
self.config = config
def forward(self, outputs):
aux_out = {}
lddt_logits = self.plddt(outputs['sm']['single'])
aux_out['lddt_logits'] = lddt_logits
aux_out['plddt'] = compute_plddt(lddt_logits)
distogram_logits = self.distogram(outputs['pair'])
aux_out['distogram_logits'] = distogram_logits
masked_msa_logits = self.masked_msa(outputs['msa'])
aux_out['masked_msa_logits'] = masked_msa_logits
experimentally_resolved_logits = self.experimentally_resolved(outputs['single'])
aux_out['experimentally_resolved_logits'] = experimentally_resolved_logits
if self.config.tm.enabled:
tm_logits = self.tm(outputs['pair'])
aux_out['tm_logits'] = tm_logits
aux_out['predicted_tm_score'] = compute_tm(tm_logits, **self.config.tm)
aux_out.update(compute_predicted_aligned_error(tm_logits, **self.config.tm))
return aux_out
|
class PerResidueLDDTCaPredictor(nn.Module):
def __init__(self, no_bins, c_in, c_hidden):
super(PerResidueLDDTCaPredictor, self).__init__()
self.no_bins = no_bins
self.c_in = c_in
self.c_hidden = c_hidden
self.layer_norm = LayerNorm(self.c_in)
self.linear_1 = Linear(self.c_in, self.c_hidden, init='relu')
self.linear_2 = Linear(self.c_hidden, self.c_hidden, init='relu')
self.linear_3 = Linear(self.c_hidden, self.no_bins, init='final')
self.relu = nn.ReLU()
def forward(self, s):
s = self.layer_norm(s)
s = self.linear_1(s)
s = self.relu(s)
s = self.linear_2(s)
s = self.relu(s)
s = self.linear_3(s)
return s
|
class DistogramHead(nn.Module):
'\n Computes a distogram probability distribution.\n\n For use in computation of distogram loss, subsection 1.9.8\n '
def __init__(self, c_z, no_bins, **kwargs):
'\n Args:\n c_z:\n Input channel dimension\n no_bins:\n Number of distogram bins\n '
super(DistogramHead, self).__init__()
self.c_z = c_z
self.no_bins = no_bins
self.linear = Linear(self.c_z, self.no_bins, init='final')
def _forward(self, z):
'\n Args:\n z:\n [*, N_res, N_res, C_z] pair embedding\n Returns:\n [*, N, N, no_bins] distogram probability distribution\n '
logits = self.linear(z)
logits = (logits + logits.transpose((- 2), (- 3)))
return logits
def forward(self, z):
if is_fp16_enabled():
with torch.cuda.amp.autocast(enabled=False):
return self._forward(z.float())
else:
return self._forward(z)
|
class TMScoreHead(nn.Module):
'\n For use in computation of TM-score, subsection 1.9.7\n '
def __init__(self, c_z, no_bins, **kwargs):
'\n Args:\n c_z:\n Input channel dimension\n no_bins:\n Number of bins\n '
super(TMScoreHead, self).__init__()
self.c_z = c_z
self.no_bins = no_bins
self.linear = Linear(self.c_z, self.no_bins, init='final')
def forward(self, z):
'\n Args:\n z:\n [*, N_res, N_res, C_z] pairwise embedding\n Returns:\n [*, N_res, N_res, no_bins] prediction\n '
logits = self.linear(z)
return logits
|
class MaskedMSAHead(nn.Module):
'\n For use in computation of masked MSA loss, subsection 1.9.9\n '
def __init__(self, c_m, c_out, **kwargs):
'\n Args:\n c_m:\n MSA channel dimension\n c_out:\n Output channel dimension\n '
super(MaskedMSAHead, self).__init__()
self.c_m = c_m
self.c_out = c_out
self.linear = Linear(self.c_m, self.c_out, init='final')
def forward(self, m):
'\n Args:\n m:\n [*, N_seq, N_res, C_m] MSA embedding\n Returns:\n [*, N_seq, N_res, C_out] reconstruction\n '
logits = self.linear(m)
return logits
|
class ExperimentallyResolvedHead(nn.Module):
'\n For use in computation of "experimentally resolved" loss, subsection\n 1.9.10\n '
def __init__(self, c_s, c_out, **kwargs):
'\n Args:\n c_s:\n Input channel dimension\n c_out:\n Number of distogram bins\n '
super(ExperimentallyResolvedHead, self).__init__()
self.c_s = c_s
self.c_out = c_out
self.linear = Linear(self.c_s, self.c_out, init='final')
def forward(self, s):
'\n Args:\n s:\n [*, N_res, C_s] single embedding\n Returns:\n [*, N, C_out] logits\n '
logits = self.linear(s)
return logits
|
class OuterProductMean(nn.Module):
'\n Implements Algorithm 10.\n '
def __init__(self, c_m, c_z, c_hidden, eps=0.001):
'\n Args:\n c_m:\n MSA embedding channel dimension\n c_z:\n Pair embedding channel dimension\n c_hidden:\n Hidden channel dimension\n '
super(OuterProductMean, self).__init__()
self.c_m = c_m
self.c_z = c_z
self.c_hidden = c_hidden
self.eps = eps
self.layer_norm = nn.LayerNorm(c_m)
self.linear_1 = Linear(c_m, c_hidden)
self.linear_2 = Linear(c_m, c_hidden)
self.linear_out = Linear((c_hidden ** 2), c_z, init='final')
def _opm(self, a, b):
outer = torch.einsum('...bac,...dae->...bdce', a, b)
outer = outer.reshape((outer.shape[:(- 2)] + ((- 1),)))
outer = self.linear_out(outer)
return outer
@torch.jit.ignore
def _chunk(self, a: torch.Tensor, b: torch.Tensor, chunk_size: int) -> torch.Tensor:
a_reshape = a.reshape((((- 1),) + a.shape[(- 3):]))
b_reshape = b.reshape((((- 1),) + b.shape[(- 3):]))
out = []
for (a_prime, b_prime) in zip(a_reshape, b_reshape):
outer = chunk_layer(partial(self._opm, b=b_prime), {'a': a_prime}, chunk_size=chunk_size, no_batch_dims=1)
out.append(outer)
if (len(out) == 1):
outer = out[0].unsqueeze(0)
else:
outer = torch.stack(out, dim=0)
outer = outer.reshape((a.shape[:(- 3)] + outer.shape[1:]))
return outer
def _forward(self, m: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, inplace_safe: bool=False) -> torch.Tensor:
'\n Args:\n m:\n [*, N_seq, N_res, C_m] MSA embedding\n mask:\n [*, N_seq, N_res] MSA mask\n Returns:\n [*, N_res, N_res, C_z] pair embedding update\n '
if (mask is None):
mask = m.new_ones(m.shape[:(- 1)])
ln = self.layer_norm(m)
mask = mask.unsqueeze((- 1))
a = self.linear_1(ln)
a = (a * mask)
b = self.linear_2(ln)
b = (b * mask)
del ln
a = a.transpose((- 2), (- 3))
b = b.transpose((- 2), (- 3))
if (chunk_size is not None):
outer = self._chunk(a, b, chunk_size)
else:
outer = self._opm(a, b)
norm = torch.einsum('...abc,...adc->...bdc', mask, mask)
norm = (norm + self.eps)
if inplace_safe:
outer /= norm
else:
outer = (outer / norm)
return outer
def forward(self, m: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, inplace_safe: bool=False) -> torch.Tensor:
if is_fp16_enabled():
with torch.cuda.amp.autocast(enabled=False):
return self._forward(m.float(), mask, chunk_size, inplace_safe)
else:
return self._forward(m, mask, chunk_size, inplace_safe)
|
class PairTransition(nn.Module):
'\n Implements Algorithm 15.\n '
def __init__(self, c_z, n):
'\n Args:\n c_z:\n Pair transition channel dimension\n n:\n Factor by which c_z is multiplied to obtain hidden channel\n dimension\n '
super(PairTransition, self).__init__()
self.c_z = c_z
self.n = n
self.layer_norm = LayerNorm(self.c_z)
self.linear_1 = Linear(self.c_z, (self.n * self.c_z), init='relu')
self.relu = nn.ReLU()
self.linear_2 = Linear((self.n * self.c_z), c_z, init='final')
def _transition(self, z, mask):
z = self.layer_norm(z)
z = self.linear_1(z)
z = self.relu(z)
z = self.linear_2(z)
z = (z * mask)
return z
@torch.jit.ignore
def _chunk(self, z: torch.Tensor, mask: torch.Tensor, chunk_size: int) -> torch.Tensor:
return chunk_layer(self._transition, {'z': z, 'mask': mask}, chunk_size=chunk_size, no_batch_dims=len(z.shape[:(- 2)]))
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None) -> torch.Tensor:
'\n Args:\n z:\n [*, N_res, N_res, C_z] pair embedding\n Returns:\n [*, N_res, N_res, C_z] pair embedding update\n '
if (mask is None):
mask = z.new_ones(z.shape[:(- 1)])
mask = mask.unsqueeze((- 1))
if (chunk_size is not None):
z = self._chunk(z, mask, chunk_size)
else:
z = self._transition(z=z, mask=mask)
return z
|
def script_preset_(model: torch.nn.Module):
"\n TorchScript a handful of low-level but frequently used submodule types \n that are known to be scriptable.\n\n Args:\n model: \n A torch.nn.Module. It should contain at least some modules from \n this repository, or this function won't do anything.\n "
script_submodules_(model, [nn.Dropout, Attention, GlobalAttention, EvoformerBlock], attempt_trace=False, batch_dims=None)
|
def _get_module_device(module: torch.nn.Module) -> torch.device:
"\n Fetches the device of a module, assuming that all of the module's\n parameters reside on a single device\n\n Args:\n module: A torch.nn.Module\n Returns:\n The module's device\n "
return next(module.parameters()).device
|
def _trace_module(module, batch_dims=None):
if (batch_dims is None):
batch_dims = ()
n_seq = 10
n_res = 10
device = _get_module_device(module)
def msa(channel_dim):
return torch.rand((*batch_dims, n_seq, n_res, channel_dim), device=device)
def pair(channel_dim):
return torch.rand((*batch_dims, n_res, n_res, channel_dim), device=device)
if isinstance(module, MSARowAttentionWithPairBias):
inputs = {'forward': (msa(module.c_in), pair(module.c_z), torch.randint(0, 2, (*batch_dims, n_seq, n_res)))}
elif isinstance(module, MSAColumnAttention):
inputs = {'forward': (msa(module.c_in), torch.randint(0, 2, (*batch_dims, n_seq, n_res)))}
elif isinstance(module, OuterProductMean):
inputs = {'forward': (msa(module.c_m), torch.randint(0, 2, (*batch_dims, n_seq, n_res)))}
else:
raise TypeError(f'tracing is not supported for modules of type {type(module)}')
return torch.jit.trace_module(module, inputs)
|
def _script_submodules_helper_(model, types, attempt_trace, to_trace):
for (name, child) in model.named_children():
if ((types is None) or any((isinstance(child, t) for t in types))):
try:
scripted = torch.jit.script(child)
setattr(model, name, scripted)
continue
except (RuntimeError, torch.jit.frontend.NotSupportedError) as e:
if attempt_trace:
to_trace.add(type(child))
else:
raise e
_script_submodules_helper_(child, types, attempt_trace, to_trace)
|
def _trace_submodules_(model, types, batch_dims=None):
for (name, child) in model.named_children():
if any((isinstance(child, t) for t in types)):
traced = _trace_module(child, batch_dims=batch_dims)
setattr(model, name, traced)
else:
_trace_submodules_(child, types, batch_dims=batch_dims)
|
def script_submodules_(model: nn.Module, types: Optional[Sequence[type]]=None, attempt_trace: Optional[bool]=True, batch_dims: Optional[Tuple[int]]=None):
'\n Convert all submodules whose types match one of those in the input \n list to recursively scripted equivalents in place. To script the entire\n model, just call torch.jit.script on it directly.\n\n When types is None, all submodules are scripted.\n\n Args:\n model: \n A torch.nn.Module\n types: \n A list of types of submodules to script\n attempt_trace: \n Whether to attempt to trace specified modules if scripting \n fails. Recall that tracing eliminates all conditional \n logic---with great tracing comes the mild responsibility of \n having to remember to ensure that the modules in question \n perform the same computations no matter what.\n '
to_trace = set()
_script_submodules_helper_(model, types, attempt_trace, to_trace)
if (attempt_trace and (len(to_trace) > 0)):
_trace_submodules_(model, to_trace, batch_dims=batch_dims)
|
def fix_pdb(pdbfile, alterations_info):
'Apply pdbfixer to the contents of a PDB file; return a PDB string result.\n\n 1) Replaces nonstandard residues.\n 2) Removes heterogens (non protein residues) including water.\n 3) Adds missing residues and missing atoms within existing residues.\n 4) Adds hydrogens assuming pH=7.0.\n 5) KeepIds is currently true, so the fixer must keep the existing chain and\n residue identifiers. This will fail for some files in wider PDB that have\n invalid IDs.\n\n Args:\n pdbfile: Input PDB file handle.\n alterations_info: A dict that will store details of changes made.\n\n Returns:\n A PDB string representing the fixed structure.\n '
fixer = pdbfixer.PDBFixer(pdbfile=pdbfile)
fixer.findNonstandardResidues()
alterations_info['nonstandard_residues'] = fixer.nonstandardResidues
fixer.replaceNonstandardResidues()
_remove_heterogens(fixer, alterations_info, keep_water=False)
fixer.findMissingResidues()
alterations_info['missing_residues'] = fixer.missingResidues
fixer.findMissingAtoms()
alterations_info['missing_heavy_atoms'] = fixer.missingAtoms
alterations_info['missing_terminals'] = fixer.missingTerminals
fixer.addMissingAtoms(seed=0)
fixer.addMissingHydrogens()
out_handle = io.StringIO()
app.PDBFile.writeFile(fixer.topology, fixer.positions, out_handle, keepIds=True)
return out_handle.getvalue()
|
def clean_structure(pdb_structure, alterations_info):
'Applies additional fixes to an OpenMM structure, to handle edge cases.\n\n Args:\n pdb_structure: An OpenMM structure to modify and fix.\n alterations_info: A dict that will store details of changes made.\n '
_replace_met_se(pdb_structure, alterations_info)
_remove_chains_of_length_one(pdb_structure, alterations_info)
|
def _remove_heterogens(fixer, alterations_info, keep_water):
'Removes the residues that Pdbfixer considers to be heterogens.\n\n Args:\n fixer: A Pdbfixer instance.\n alterations_info: A dict that will store details of changes made.\n keep_water: If True, water (HOH) is not considered to be a heterogen.\n '
initial_resnames = set()
for chain in fixer.topology.chains():
for residue in chain.residues():
initial_resnames.add(residue.name)
fixer.removeHeterogens(keepWater=keep_water)
final_resnames = set()
for chain in fixer.topology.chains():
for residue in chain.residues():
final_resnames.add(residue.name)
alterations_info['removed_heterogens'] = initial_resnames.difference(final_resnames)
|
def _replace_met_se(pdb_structure, alterations_info):
'Replace the Se in any MET residues that were not marked as modified.'
modified_met_residues = []
for res in pdb_structure.iter_residues():
name = res.get_name_with_spaces().strip()
if (name == 'MET'):
s_atom = res.get_atom('SD')
if (s_atom.element_symbol == 'Se'):
s_atom.element_symbol = 'S'
s_atom.element = element.get_by_symbol('S')
modified_met_residues.append(s_atom.residue_number)
alterations_info['Se_in_MET'] = modified_met_residues
|
def _remove_chains_of_length_one(pdb_structure, alterations_info):
'Removes chains that correspond to a single amino acid.\n\n A single amino acid in a chain is both N and C terminus. There is no force\n template for this case.\n\n Args:\n pdb_structure: An OpenMM pdb_structure to modify and fix.\n alterations_info: A dict that will store details of changes made.\n '
removed_chains = {}
for model in pdb_structure.iter_models():
valid_chains = [c for c in model.iter_chains() if (len(c) > 1)]
invalid_chain_ids = [c.chain_id for c in model.iter_chains() if (len(c) <= 1)]
model.chains = valid_chains
for chain_id in invalid_chain_ids:
model.chains_by_id.pop(chain_id)
removed_chains[model.number] = invalid_chain_ids
alterations_info['removed_chains'] = removed_chains
|
class AmberRelaxation(object):
'Amber relaxation.'
def __init__(self, *, max_iterations: int, tolerance: float, stiffness: float, exclude_residues: Sequence[int], max_outer_iterations: int, use_gpu: bool):
'Initialize Amber Relaxer.\n\n Args:\n max_iterations: Maximum number of L-BFGS iterations. 0 means no max.\n tolerance: kcal/mol, the energy tolerance of L-BFGS.\n stiffness: kcal/mol A**2, spring constant of heavy atom restraining\n potential.\n exclude_residues: Residues to exclude from per-atom restraining.\n Zero-indexed.\n max_outer_iterations: Maximum number of violation-informed relax\n iterations. A value of 1 will run the non-iterative procedure used in\n CASP14. Use 20 so that >95% of the bad cases are relaxed. Relax finishes\n as soon as there are no violations, hence in most cases this causes no\n slowdown. In the worst case we do 20 outer iterations.\n use_gpu: Whether to run on GPU\n '
self._max_iterations = max_iterations
self._tolerance = tolerance
self._stiffness = stiffness
self._exclude_residues = exclude_residues
self._max_outer_iterations = max_outer_iterations
self._use_gpu = use_gpu
def process(self, *, prot: protein.Protein, cif_output: bool) -> Tuple[(str, Dict[(str, Any)], np.ndarray)]:
'Runs Amber relax on a prediction, adds hydrogens, returns PDB string.'
out = amber_minimize.run_pipeline(prot=prot, max_iterations=self._max_iterations, tolerance=self._tolerance, stiffness=self._stiffness, exclude_residues=self._exclude_residues, max_outer_iterations=self._max_outer_iterations, use_gpu=self._use_gpu)
min_pos = out['pos']
start_pos = out['posinit']
rmsd = np.sqrt((np.sum(((start_pos - min_pos) ** 2)) / start_pos.shape[0]))
debug_data = {'initial_energy': out['einit'], 'final_energy': out['efinal'], 'attempts': out['min_attempts'], 'rmsd': rmsd}
pdb_str = amber_minimize.clean_protein(prot)
min_pdb = utils.overwrite_pdb_coordinates(pdb_str, min_pos)
min_pdb = utils.overwrite_b_factors(min_pdb, prot.b_factors)
utils.assert_equal_nonterminal_atom_types(protein.from_pdb_string(min_pdb).atom_mask, prot.atom_mask)
violations = out['structural_violations']['total_per_residue_violations_mask']
min_pdb = protein.add_pdb_headers(prot, min_pdb)
output_str = min_pdb
if cif_output:
final_prot = protein.from_pdb_string(min_pdb)
output_str = protein.to_modelcif(final_prot)
return (output_str, debug_data, violations)
|
def overwrite_pdb_coordinates(pdb_str: str, pos) -> str:
pdb_file = io.StringIO(pdb_str)
structure = PdbStructure(pdb_file)
topology = openmm_app.PDBFile(structure).getTopology()
with io.StringIO() as f:
openmm_app.PDBFile.writeFile(topology, pos, f)
return f.getvalue()
|
def overwrite_b_factors(pdb_str: str, bfactors: np.ndarray) -> str:
'Overwrites the B-factors in pdb_str with contents of bfactors array.\n\n Args:\n pdb_str: An input PDB string.\n bfactors: A numpy array with shape [1, n_residues, 37]. We assume that the\n B-factors are per residue; i.e. that the nonzero entries are identical in\n [0, i, :].\n\n Returns:\n A new PDB string with the B-factors replaced.\n '
if (bfactors.shape[(- 1)] != residue_constants.atom_type_num):
raise ValueError(f'Invalid final dimension size for bfactors: {bfactors.shape[(- 1)]}.')
parser = PDB.PDBParser(QUIET=True)
handle = io.StringIO(pdb_str)
structure = parser.get_structure('', handle)
curr_resid = ('', '', '')
idx = (- 1)
for atom in structure.get_atoms():
atom_resid = atom.parent.get_id()
if (atom_resid != curr_resid):
idx += 1
if (idx >= bfactors.shape[0]):
raise ValueError('Index into bfactors exceeds number of residues. B-factors shape: {shape}, idx: {idx}.')
curr_resid = atom_resid
atom.bfactor = bfactors[(idx, residue_constants.atom_order['CA'])]
new_pdb = io.StringIO()
pdb_io = PDB.PDBIO()
pdb_io.set_structure(structure)
pdb_io.save(new_pdb)
return new_pdb.getvalue()
|
def assert_equal_nonterminal_atom_types(atom_mask: np.ndarray, ref_atom_mask: np.ndarray):
'Checks that pre- and post-minimized proteins have same atom set.'
oxt = residue_constants.atom_order['OXT']
no_oxt_mask = np.ones(shape=atom_mask.shape, dtype=np.bool)
no_oxt_mask[(..., oxt)] = False
np.testing.assert_almost_equal(ref_atom_mask[no_oxt_mask], atom_mask[no_oxt_mask])
|
class ArgparseAlphabetizer(HelpFormatter):
'\n Sorts the optional arguments of an argparse parser alphabetically\n '
@staticmethod
def sort_actions(actions):
return sorted(actions, key=attrgetter('option_strings'))
def add_arguments(self, actions):
actions = ArgparseAlphabetizer.sort_actions(actions)
super(ArgparseAlphabetizer, self).add_arguments(actions)
def add_usage(self, usage, actions, groups, prefix=None):
actions = ArgparseAlphabetizer.sort_actions(actions)
args = (usage, actions, groups, prefix)
super(ArgparseAlphabetizer, self).add_usage(*args)
|
def remove_arguments(parser, args):
for arg in args:
for action in parser._actions:
opts = vars(action)['option_strings']
if (arg in opts):
parser._handle_conflict_resolve(None, [(arg, action)])
|
class EarlyStoppingVerbose(EarlyStopping):
"\n The default EarlyStopping callback's verbose mode is too verbose.\n This class outputs a message only when it's getting ready to stop. \n "
def _evalute_stopping_criteria(self, *args, **kwargs):
(should_stop, reason) = super()._evalute_stopping_criteria(*args, **kwargs)
if should_stop:
rank_zero_info(f'''{reason}
''')
return (should_stop, reason)
|
def get_checkpoint_fn():
deepspeed_is_configured = (deepspeed_is_installed and deepspeed.checkpointing.is_configured())
if deepspeed_is_configured:
checkpoint = deepspeed.checkpointing.checkpoint
else:
checkpoint = torch.utils.checkpoint.checkpoint
return checkpoint
|
@torch.jit.ignore
def checkpoint_blocks(blocks: List[Callable], args: BLOCK_ARGS, blocks_per_ckpt: Optional[int]) -> BLOCK_ARGS:
'\n Chunk a list of blocks and run each chunk with activation\n checkpointing. We define a "block" as a callable whose only inputs are\n the outputs of the previous block.\n\n Implements Subsection 1.11.8\n\n Args:\n blocks:\n List of blocks\n args:\n Tuple of arguments for the first block.\n blocks_per_ckpt:\n Size of each chunk. A higher value corresponds to fewer \n checkpoints, and trades memory for speed. If None, no checkpointing \n is performed.\n Returns:\n The output of the final block\n '
def wrap(a):
return ((a,) if (type(a) is not tuple) else a)
def exec(b, a):
for block in b:
a = wrap(block(*a))
return a
def chunker(s, e):
def exec_sliced(*a):
return exec(blocks[s:e], a)
return exec_sliced
args = wrap(args)
if ((blocks_per_ckpt is None) or (not torch.is_grad_enabled())):
return exec(blocks, args)
elif ((blocks_per_ckpt < 1) or (blocks_per_ckpt > len(blocks))):
raise ValueError('blocks_per_ckpt must be between 1 and len(blocks)')
checkpoint = get_checkpoint_fn()
for s in range(0, len(blocks), blocks_per_ckpt):
e = (s + blocks_per_ckpt)
args = checkpoint(chunker(s, e), *args)
args = wrap(args)
return args
|
def _fetch_dims(tree):
shapes = []
tree_type = type(tree)
if (tree_type is dict):
for v in tree.values():
shapes.extend(_fetch_dims(v))
elif ((tree_type is list) or (tree_type is tuple)):
for t in tree:
shapes.extend(_fetch_dims(t))
elif (tree_type is torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
|
@torch.jit.ignore
def _flat_idx_to_idx(flat_idx: int, dims: Tuple[int]) -> Tuple[int]:
idx = []
for d in reversed(dims):
idx.append((flat_idx % d))
flat_idx = (flat_idx // d)
return tuple(reversed(idx))
|
@torch.jit.ignore
def _get_minimal_slice_set(start: Sequence[int], end: Sequence[int], dims: int, start_edges: Optional[Sequence[bool]]=None, end_edges: Optional[Sequence[bool]]=None) -> Sequence[Tuple[int]]:
" \n Produces an ordered sequence of tensor slices that, when used in\n sequence on a tensor with shape dims, yields tensors that contain every\n leaf in the contiguous range [start, end]. Care is taken to yield a \n short sequence of slices, and perhaps even the shortest possible (I'm \n pretty sure it's the latter).\n \n end is INCLUSIVE. \n "
def reduce_edge_list(l):
tally = 1
for i in range(len(l)):
reversed_idx = ((- 1) * (i + 1))
l[reversed_idx] *= tally
tally = l[reversed_idx]
if (start_edges is None):
start_edges = [(s == 0) for s in start]
reduce_edge_list(start_edges)
if (end_edges is None):
end_edges = [(e == (d - 1)) for (e, d) in zip(end, dims)]
reduce_edge_list(end_edges)
if (len(start) == 0):
return [tuple()]
elif (len(start) == 1):
return [(slice(start[0], (end[0] + 1)),)]
slices = []
path = []
for (s, e) in zip(start, end):
if (s == e):
path.append(slice(s, (s + 1)))
else:
break
path = tuple(path)
divergence_idx = len(path)
if (divergence_idx == len(dims)):
return [tuple(path)]
def upper():
sdi = start[divergence_idx]
return [((path + (slice(sdi, (sdi + 1)),)) + s) for s in _get_minimal_slice_set(start[(divergence_idx + 1):], [(d - 1) for d in dims[(divergence_idx + 1):]], dims[(divergence_idx + 1):], start_edges=start_edges[(divergence_idx + 1):], end_edges=[1 for _ in end_edges[(divergence_idx + 1):]])]
def lower():
edi = end[divergence_idx]
return [((path + (slice(edi, (edi + 1)),)) + s) for s in _get_minimal_slice_set([0 for _ in start[(divergence_idx + 1):]], end[(divergence_idx + 1):], dims[(divergence_idx + 1):], start_edges=[1 for _ in start_edges[(divergence_idx + 1):]], end_edges=end_edges[(divergence_idx + 1):])]
if (start_edges[divergence_idx] and end_edges[divergence_idx]):
slices.append((path + (slice(start[divergence_idx], (end[divergence_idx] + 1)),)))
elif start_edges[divergence_idx]:
slices.append((path + (slice(start[divergence_idx], end[divergence_idx]),)))
slices.extend(lower())
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append((path + (slice((start[divergence_idx] + 1), (end[divergence_idx] + 1)),)))
else:
slices.extend(upper())
middle_ground = (end[divergence_idx] - start[divergence_idx])
if (middle_ground > 1):
slices.append((path + (slice((start[divergence_idx] + 1), end[divergence_idx]),)))
slices.extend(lower())
return [tuple(s) for s in slices]
|
@torch.jit.ignore
def _chunk_slice(t: torch.Tensor, flat_start: int, flat_end: int, no_batch_dims: int) -> torch.Tensor:
'\n Equivalent to\n \n t.reshape((-1,) + t.shape[no_batch_dims:])[flat_start:flat_end]\n\n but without the need for the initial reshape call, which can be \n memory-intensive in certain situations. The only reshape operations\n in this function are performed on sub-tensors that scale with\n (flat_end - flat_start), the chunk size.\n '
batch_dims = t.shape[:no_batch_dims]
start_idx = list(_flat_idx_to_idx(flat_start, batch_dims))
end_idx = list(_flat_idx_to_idx((flat_end - 1), batch_dims))
slices = _get_minimal_slice_set(start_idx, end_idx, batch_dims)
sliced_tensors = [t[s] for s in slices]
return torch.cat([s.view((((- 1),) + t.shape[no_batch_dims:])) for s in sliced_tensors])
|
def chunk_layer(layer: Callable, inputs: Dict[(str, Any)], chunk_size: int, no_batch_dims: int, low_mem: bool=False, _out: Any=None, _add_into_out: bool=False) -> Any:
'\n Implements the "chunking" procedure described in section 1.11.8.\n\n Layer outputs and inputs are assumed to be simple "pytrees,"\n consisting only of (arbitrarily nested) lists, tuples, and dicts with\n torch.Tensor leaves.\n\n Args:\n layer:\n The layer to be applied chunk-wise\n inputs:\n A (non-nested) dictionary of keyworded inputs. All leaves must\n be tensors and must share the same batch dimensions.\n chunk_size:\n The number of sub-batches per chunk. If multiple batch\n dimensions are specified, a "sub-batch" is defined as a single\n indexing of all batch dimensions simultaneously (s.t. the\n number of sub-batches is the product of the batch dimensions).\n no_batch_dims:\n How many of the initial dimensions of each input tensor can\n be considered batch dimensions.\n low_mem:\n Avoids flattening potentially large input tensors. Unnecessary\n in most cases, and is ever so slightly slower than the default\n setting.\n Returns:\n The reassembled output of the layer on the inputs.\n '
if (not (len(inputs) > 0)):
raise ValueError('Must provide at least one input')
initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]
orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])
def _prep_inputs(t):
if (not low_mem):
if (not (sum(t.shape[:no_batch_dims]) == no_batch_dims)):
t = t.expand((orig_batch_dims + t.shape[no_batch_dims:]))
t = t.reshape((- 1), *t.shape[no_batch_dims:])
else:
t = t.expand((orig_batch_dims + t.shape[no_batch_dims:]))
return t
prepped_inputs = tensor_tree_map(_prep_inputs, inputs)
prepped_outputs = None
if (_out is not None):
reshape_fn = (lambda t: t.view(([(- 1)] + list(t.shape[no_batch_dims:]))))
prepped_outputs = tensor_tree_map(reshape_fn, _out)
flat_batch_dim = 1
for d in orig_batch_dims:
flat_batch_dim *= d
no_chunks = ((flat_batch_dim // chunk_size) + ((flat_batch_dim % chunk_size) != 0))
i = 0
out = prepped_outputs
for _ in range(no_chunks):
if (not low_mem):
select_chunk = (lambda t: (t[i:(i + chunk_size)] if (t.shape[0] != 1) else t))
else:
select_chunk = partial(_chunk_slice, flat_start=i, flat_end=min(flat_batch_dim, (i + chunk_size)), no_batch_dims=len(orig_batch_dims))
chunks = tensor_tree_map(select_chunk, prepped_inputs)
output_chunk = layer(**chunks)
if (out is None):
allocate = (lambda t: t.new_zeros(((flat_batch_dim,) + t.shape[1:])))
out = tensor_tree_map(allocate, output_chunk)
out_type = type(output_chunk)
if (out_type is dict):
def assign(d1, d2):
for (k, v) in d1.items():
if (type(v) is dict):
assign(v, d2[k])
elif _add_into_out:
v[i:(i + chunk_size)] += d2[k]
else:
v[i:(i + chunk_size)] = d2[k]
assign(out, output_chunk)
elif (out_type is tuple):
for (x1, x2) in zip(out, output_chunk):
if _add_into_out:
x1[i:(i + chunk_size)] += x2
else:
x1[i:(i + chunk_size)] = x2
elif (out_type is torch.Tensor):
if _add_into_out:
out[i:(i + chunk_size)] += output_chunk
else:
out[i:(i + chunk_size)] = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
reshape = (lambda t: t.view((orig_batch_dims + t.shape[1:])))
out = tensor_tree_map(reshape, out)
return out
|
class ChunkSizeTuner():
def __init__(self, max_chunk_size=512):
self.max_chunk_size = max_chunk_size
self.cached_chunk_size = None
self.cached_arg_data = None
def _determine_favorable_chunk_size(self, fn, args, min_chunk_size):
logging.info('Tuning chunk size...')
if (min_chunk_size >= self.max_chunk_size):
return min_chunk_size
candidates = [(2 ** l) for l in range((int(math.log(self.max_chunk_size, 2)) + 1))]
candidates = [c for c in candidates if (c > min_chunk_size)]
candidates = ([min_chunk_size] + candidates)
candidates[(- 1)] += 4
def test_chunk_size(chunk_size):
try:
with torch.no_grad():
fn(*args, chunk_size=chunk_size)
return True
except RuntimeError:
return False
min_viable_chunk_size_index = 0
i = (len(candidates) - 1)
while (i > min_viable_chunk_size_index):
viable = test_chunk_size(candidates[i])
if (not viable):
i = ((min_viable_chunk_size_index + i) // 2)
else:
min_viable_chunk_size_index = i
i = (((i + len(candidates)) - 1) // 2)
return candidates[min_viable_chunk_size_index]
def _compare_arg_caches(self, ac1, ac2):
consistent = True
for (a1, a2) in zip(ac1, ac2):
assert (type(ac1) == type(ac2))
if ((type(ac1) is list) or (type(ac1) is tuple)):
consistent &= self._compare_arg_caches(a1, a2)
elif (type(ac1) is dict):
a1_items = [v for (_, v) in sorted(a1.items(), key=(lambda x: x[0]))]
a2_items = [v for (_, v) in sorted(a2.items(), key=(lambda x: x[0]))]
consistent &= self._compare_arg_caches(a1_items, a2_items)
else:
consistent &= (a1 == a2)
return consistent
def tune_chunk_size(self, representative_fn: Callable, args: Tuple[Any], min_chunk_size: int) -> int:
consistent = True
remove_tensors = (lambda a: (a.shape if (type(a) is torch.Tensor) else a))
arg_data = tree_map(remove_tensors, args, object)
if (self.cached_arg_data is not None):
assert (len(self.cached_arg_data) == len(arg_data))
consistent = self._compare_arg_caches(self.cached_arg_data, arg_data)
else:
consistent = False
if (not consistent):
self.cached_chunk_size = self._determine_favorable_chunk_size(representative_fn, args, min_chunk_size)
self.cached_arg_data = arg_data
return self.cached_chunk_size
|
class ExponentialMovingAverage():
'\n Maintains moving averages of parameters with exponential decay\n\n At each step, the stored copy `copy` of each parameter `param` is\n updated as follows:\n\n `copy = decay * copy + (1 - decay) * param`\n\n where `decay` is an attribute of the ExponentialMovingAverage object.\n '
def __init__(self, model: nn.Module, decay: float):
'\n Args:\n model:\n A torch.nn.Module whose parameters are to be tracked\n decay:\n A value (usually close to 1.) by which updates are\n weighted as part of the above formula\n '
super(ExponentialMovingAverage, self).__init__()
clone_param = (lambda t: t.clone().detach())
self.params = tensor_tree_map(clone_param, model.state_dict())
self.decay = decay
self.device = next(model.parameters()).device
def to(self, device):
self.params = tensor_tree_map((lambda t: t.to(device)), self.params)
self.device = device
def _update_state_dict_(self, update, state_dict):
with torch.no_grad():
for (k, v) in update.items():
stored = state_dict[k]
if (not isinstance(v, torch.Tensor)):
self._update_state_dict_(v, stored)
else:
diff = (stored - v)
diff *= (1 - self.decay)
stored -= diff
def update(self, model: torch.nn.Module) -> None:
'\n Updates the stored parameters using the state dict of the provided\n module. The module should have the same structure as that used to\n initialize the ExponentialMovingAverage object.\n '
self._update_state_dict_(model.state_dict(), self.params)
def load_state_dict(self, state_dict: OrderedDict) -> None:
for k in state_dict['params'].keys():
self.params[k] = state_dict['params'][k].clone()
self.decay = state_dict['decay']
def state_dict(self) -> OrderedDict:
return OrderedDict({'params': self.params, 'decay': self.decay})
|
class ParamType(Enum):
LinearWeight = partial((lambda w: w.transpose((- 1), (- 2))))
LinearWeightMHA = partial((lambda w: w.reshape(*w.shape[:(- 2)], (- 1)).transpose((- 1), (- 2))))
LinearMHAOutputWeight = partial((lambda w: w.reshape(*w.shape[:(- 3)], (- 1), w.shape[(- 1)]).transpose((- 1), (- 2))))
LinearBiasMHA = partial((lambda w: w.reshape(*w.shape[:(- 2)], (- 1))))
LinearWeightOPM = partial((lambda w: w.reshape(*w.shape[:(- 3)], (- 1), w.shape[(- 1)]).transpose((- 1), (- 2))))
Other = partial((lambda w: w))
def __init__(self, fn):
self.transformation = fn
|
@dataclass
class Param():
param: Union[(torch.Tensor, List[torch.Tensor])]
param_type: ParamType = ParamType.Other
stacked: bool = False
|
def process_translation_dict(d, top_layer=True):
flat = {}
for (k, v) in d.items():
if (type(v) == dict):
prefix = (_NPZ_KEY_PREFIX if top_layer else '')
sub_flat = {(prefix + '/'.join([k, k_prime])): v_prime for (k_prime, v_prime) in process_translation_dict(v, top_layer=False).items()}
flat.update(sub_flat)
else:
k = (('/' + k) if (not top_layer) else k)
flat[k] = v
return flat
|
def stacked(param_dict_list, out=None):
'\n Args:\n param_dict_list:\n A list of (nested) Param dicts to stack. The structure of\n each dict must be the identical (down to the ParamTypes of\n "parallel" Params). There must be at least one dict\n in the list.\n '
if (out is None):
out = {}
template = param_dict_list[0]
for (k, _) in template.items():
v = [d[k] for d in param_dict_list]
if (type(v[0]) is dict):
out[k] = {}
stacked(v, out=out[k])
elif (type(v[0]) is Param):
stacked_param = Param(param=[param.param for param in v], param_type=v[0].param_type, stacked=True)
out[k] = stacked_param
return out
|
def assign(translation_dict, orig_weights):
for (k, param) in translation_dict.items():
with torch.no_grad():
weights = torch.as_tensor(orig_weights[k])
(ref, param_type) = (param.param, param.param_type)
if param.stacked:
weights = torch.unbind(weights, 0)
else:
weights = [weights]
ref = [ref]
try:
weights = list(map(param_type.transformation, weights))
for (p, w) in zip(ref, weights):
p.copy_(w)
except:
print(k)
print(ref[0].shape)
print(weights[0].shape)
raise
|
def generate_translation_dict(model, version):
LinearWeight = (lambda l: Param(l, param_type=ParamType.LinearWeight))
LinearBias = (lambda l: Param(l))
LinearWeightMHA = (lambda l: Param(l, param_type=ParamType.LinearWeightMHA))
LinearBiasMHA = (lambda b: Param(b, param_type=ParamType.LinearBiasMHA))
LinearWeightOPM = (lambda l: Param(l, param_type=ParamType.LinearWeightOPM))
LinearParams = (lambda l: {'weights': LinearWeight(l.weight), 'bias': LinearBias(l.bias)})
LayerNormParams = (lambda l: {'scale': Param(l.weight), 'offset': Param(l.bias)})
AttentionParams = (lambda att: {'query_w': LinearWeightMHA(att.linear_q.weight), 'key_w': LinearWeightMHA(att.linear_k.weight), 'value_w': LinearWeightMHA(att.linear_v.weight), 'output_w': Param(att.linear_o.weight, param_type=ParamType.LinearMHAOutputWeight), 'output_b': LinearBias(att.linear_o.bias)})
AttentionGatedParams = (lambda att: dict(**AttentionParams(att), **{'gating_w': LinearWeightMHA(att.linear_g.weight), 'gating_b': LinearBiasMHA(att.linear_g.bias)}))
GlobalAttentionParams = (lambda att: dict(AttentionGatedParams(att), key_w=LinearWeight(att.linear_k.weight), value_w=LinearWeight(att.linear_v.weight)))
TriAttParams = (lambda tri_att: {'query_norm': LayerNormParams(tri_att.layer_norm), 'feat_2d_weights': LinearWeight(tri_att.linear.weight), 'attention': AttentionGatedParams(tri_att.mha)})
TriMulOutParams = (lambda tri_mul: {'layer_norm_input': LayerNormParams(tri_mul.layer_norm_in), 'left_projection': LinearParams(tri_mul.linear_a_p), 'right_projection': LinearParams(tri_mul.linear_b_p), 'left_gate': LinearParams(tri_mul.linear_a_g), 'right_gate': LinearParams(tri_mul.linear_b_g), 'center_layer_norm': LayerNormParams(tri_mul.layer_norm_out), 'output_projection': LinearParams(tri_mul.linear_z), 'gating_linear': LinearParams(tri_mul.linear_g)})
TriMulInParams = (lambda tri_mul: {'layer_norm_input': LayerNormParams(tri_mul.layer_norm_in), 'left_projection': LinearParams(tri_mul.linear_b_p), 'right_projection': LinearParams(tri_mul.linear_a_p), 'left_gate': LinearParams(tri_mul.linear_b_g), 'right_gate': LinearParams(tri_mul.linear_a_g), 'center_layer_norm': LayerNormParams(tri_mul.layer_norm_out), 'output_projection': LinearParams(tri_mul.linear_z), 'gating_linear': LinearParams(tri_mul.linear_g)})
PairTransitionParams = (lambda pt: {'input_layer_norm': LayerNormParams(pt.layer_norm), 'transition1': LinearParams(pt.linear_1), 'transition2': LinearParams(pt.linear_2)})
MSAAttParams = (lambda matt: {'query_norm': LayerNormParams(matt.layer_norm_m), 'attention': AttentionGatedParams(matt.mha)})
MSAColAttParams = (lambda matt: {'query_norm': LayerNormParams(matt._msa_att.layer_norm_m), 'attention': AttentionGatedParams(matt._msa_att.mha)})
MSAGlobalAttParams = (lambda matt: {'query_norm': LayerNormParams(matt.layer_norm_m), 'attention': GlobalAttentionParams(matt.global_attention)})
MSAAttPairBiasParams = (lambda matt: dict(**MSAAttParams(matt), **{'feat_2d_norm': LayerNormParams(matt.layer_norm_z), 'feat_2d_weights': LinearWeight(matt.linear_z.weight)}))
IPAParams = (lambda ipa: {'q_scalar': LinearParams(ipa.linear_q), 'kv_scalar': LinearParams(ipa.linear_kv), 'q_point_local': LinearParams(ipa.linear_q_points), 'kv_point_local': LinearParams(ipa.linear_kv_points), 'trainable_point_weights': Param(param=ipa.head_weights, param_type=ParamType.Other), 'attention_2d': LinearParams(ipa.linear_b), 'output_projection': LinearParams(ipa.linear_out)})
TemplatePairBlockParams = (lambda b: {'triangle_attention_starting_node': TriAttParams(b.tri_att_start), 'triangle_attention_ending_node': TriAttParams(b.tri_att_end), 'triangle_multiplication_outgoing': TriMulOutParams(b.tri_mul_out), 'triangle_multiplication_incoming': TriMulInParams(b.tri_mul_in), 'pair_transition': PairTransitionParams(b.pair_transition)})
MSATransitionParams = (lambda m: {'input_layer_norm': LayerNormParams(m.layer_norm), 'transition1': LinearParams(m.linear_1), 'transition2': LinearParams(m.linear_2)})
OuterProductMeanParams = (lambda o: {'layer_norm_input': LayerNormParams(o.layer_norm), 'left_projection': LinearParams(o.linear_1), 'right_projection': LinearParams(o.linear_2), 'output_w': LinearWeightOPM(o.linear_out.weight), 'output_b': LinearBias(o.linear_out.bias)})
def EvoformerBlockParams(b, is_extra_msa=False):
if is_extra_msa:
col_att_name = 'msa_column_global_attention'
msa_col_att_params = MSAGlobalAttParams(b.msa_att_col)
else:
col_att_name = 'msa_column_attention'
msa_col_att_params = MSAColAttParams(b.msa_att_col)
d = {'msa_row_attention_with_pair_bias': MSAAttPairBiasParams(b.msa_att_row), col_att_name: msa_col_att_params, 'msa_transition': MSATransitionParams(b.core.msa_transition), 'outer_product_mean': OuterProductMeanParams(b.core.outer_product_mean), 'triangle_multiplication_outgoing': TriMulOutParams(b.core.tri_mul_out), 'triangle_multiplication_incoming': TriMulInParams(b.core.tri_mul_in), 'triangle_attention_starting_node': TriAttParams(b.core.tri_att_start), 'triangle_attention_ending_node': TriAttParams(b.core.tri_att_end), 'pair_transition': PairTransitionParams(b.core.pair_transition)}
return d
ExtraMSABlockParams = partial(EvoformerBlockParams, is_extra_msa=True)
FoldIterationParams = (lambda sm: {'invariant_point_attention': IPAParams(sm.ipa), 'attention_layer_norm': LayerNormParams(sm.layer_norm_ipa), 'transition': LinearParams(sm.transition.layers[0].linear_1), 'transition_1': LinearParams(sm.transition.layers[0].linear_2), 'transition_2': LinearParams(sm.transition.layers[0].linear_3), 'transition_layer_norm': LayerNormParams(sm.transition.layer_norm), 'affine_update': LinearParams(sm.bb_update.linear), 'rigid_sidechain': {'input_projection': LinearParams(sm.angle_resnet.linear_in), 'input_projection_1': LinearParams(sm.angle_resnet.linear_initial), 'resblock1': LinearParams(sm.angle_resnet.layers[0].linear_1), 'resblock2': LinearParams(sm.angle_resnet.layers[0].linear_2), 'resblock1_1': LinearParams(sm.angle_resnet.layers[1].linear_1), 'resblock2_1': LinearParams(sm.angle_resnet.layers[1].linear_2), 'unnormalized_angles': LinearParams(sm.angle_resnet.linear_out)}})
ems_blocks = model.extra_msa_stack.blocks
ems_blocks_params = stacked([ExtraMSABlockParams(b) for b in ems_blocks])
evo_blocks = model.evoformer.blocks
evo_blocks_params = stacked([EvoformerBlockParams(b) for b in evo_blocks])
translations = {'evoformer': {'preprocess_1d': LinearParams(model.input_embedder.linear_tf_m), 'preprocess_msa': LinearParams(model.input_embedder.linear_msa_m), 'left_single': LinearParams(model.input_embedder.linear_tf_z_i), 'right_single': LinearParams(model.input_embedder.linear_tf_z_j), 'prev_pos_linear': LinearParams(model.recycling_embedder.linear), 'prev_msa_first_row_norm': LayerNormParams(model.recycling_embedder.layer_norm_m), 'prev_pair_norm': LayerNormParams(model.recycling_embedder.layer_norm_z), 'pair_activiations': LinearParams(model.input_embedder.linear_relpos), 'extra_msa_activations': LinearParams(model.extra_msa_embedder.linear), 'extra_msa_stack': ems_blocks_params, 'evoformer_iteration': evo_blocks_params, 'single_activations': LinearParams(model.evoformer.linear)}, 'structure_module': {'single_layer_norm': LayerNormParams(model.structure_module.layer_norm_s), 'initial_projection': LinearParams(model.structure_module.linear_in), 'pair_layer_norm': LayerNormParams(model.structure_module.layer_norm_z), 'fold_iteration': FoldIterationParams(model.structure_module)}, 'predicted_lddt_head': {'input_layer_norm': LayerNormParams(model.aux_heads.plddt.layer_norm), 'act_0': LinearParams(model.aux_heads.plddt.linear_1), 'act_1': LinearParams(model.aux_heads.plddt.linear_2), 'logits': LinearParams(model.aux_heads.plddt.linear_3)}, 'distogram_head': {'half_logits': LinearParams(model.aux_heads.distogram.linear)}, 'experimentally_resolved_head': {'logits': LinearParams(model.aux_heads.experimentally_resolved.linear)}, 'masked_msa_head': {'logits': LinearParams(model.aux_heads.masked_msa.linear)}}
no_templ = ['model_3', 'model_4', 'model_5', 'model_3_ptm', 'model_4_ptm', 'model_5_ptm']
if (version not in no_templ):
tps_blocks = model.template_pair_stack.blocks
tps_blocks_params = stacked([TemplatePairBlockParams(b) for b in tps_blocks])
template_param_dict = {'template_embedding': {'single_template_embedding': {'embedding2d': LinearParams(model.template_pair_embedder.linear), 'template_pair_stack': {'__layer_stack_no_state': tps_blocks_params}, 'output_layer_norm': LayerNormParams(model.template_pair_stack.layer_norm)}, 'attention': AttentionParams(model.template_pointwise_att.mha)}, 'template_single_embedding': LinearParams(model.template_angle_embedder.linear_1), 'template_projection': LinearParams(model.template_angle_embedder.linear_2)}
translations['evoformer'].update(template_param_dict)
if ('_ptm' in version):
translations['predicted_aligned_error_head'] = {'logits': LinearParams(model.aux_heads.tm.linear)}
return translations
|
def import_jax_weights_(model, npz_path, version='model_1'):
data = np.load(npz_path)
translations = generate_translation_dict(model, version)
flat = process_translation_dict(translations)
keys = list(data.keys())
flat_keys = list(flat.keys())
incorrect = [k for k in flat_keys if (k not in keys)]
missing = [k for k in keys if (k not in flat_keys)]
assert (len(incorrect) == 0)
assign(flat, data)
|
class AttentionCoreFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, bias_1=None, bias_2=None):
if ((bias_1 is None) and (bias_2 is not None)):
raise ValueError('bias_1 must be specified before bias_2')
if (q.dtype not in SUPPORTED_DTYPES):
raise ValueError('Unsupported datatype')
q = q.contiguous()
k = k.contiguous()
attention_logits = torch.matmul(q, k.transpose((- 1), (- 2)))
if (bias_1 is not None):
attention_logits += bias_1
if (bias_2 is not None):
attention_logits += bias_2
attn_core_inplace_cuda.forward_(attention_logits, reduce(mul, attention_logits.shape[:(- 1)]), attention_logits.shape[(- 1)])
o = torch.matmul(attention_logits, v)
ctx.bias_1_shape = (bias_1.shape if (bias_1 is not None) else None)
ctx.bias_2_shape = (bias_2.shape if (bias_2 is not None) else None)
ctx.save_for_backward(q, k, v, attention_logits)
return o
@staticmethod
def backward(ctx, grad_output):
(q, k, v, attention_logits) = ctx.saved_tensors
grad_q = grad_k = grad_v = grad_bias_1 = grad_bias_2 = None
grad_v = torch.matmul(attention_logits.transpose((- 1), (- 2)), grad_output)
attn_core_inplace_cuda.backward_(attention_logits, grad_output.contiguous(), v.contiguous(), reduce(mul, attention_logits.shape[:(- 1)]), attention_logits.shape[(- 1)], grad_output.shape[(- 1)])
if (ctx.bias_1_shape is not None):
grad_bias_1 = torch.sum(attention_logits, dim=tuple((i for (i, d) in enumerate(ctx.bias_1_shape) if (d == 1))), keepdim=True)
if (ctx.bias_2_shape is not None):
grad_bias_2 = torch.sum(attention_logits, dim=tuple((i for (i, d) in enumerate(ctx.bias_2_shape) if (d == 1))), keepdim=True)
grad_q = torch.matmul(attention_logits, k)
grad_k = torch.matmul(q.transpose((- 1), (- 2)), attention_logits).transpose((- 1), (- 2))
return (grad_q, grad_k, grad_v, grad_bias_1, grad_bias_2)
|
def is_main_process():
return (int(os.getenv('LOCAL_RANK', '0')) == 0)
|
class PerformanceLoggingCallback(Callback):
def __init__(self, log_file, global_batch_size, warmup_steps: int=0, profile: bool=False):
logger.init(backends=[JSONStreamBackend(Verbosity.VERBOSE, log_file), StdOutBackend(Verbosity.VERBOSE)])
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.step = 0
self.profile = profile
self.timestamps = []
def do_step(self):
self.step += 1
if (self.profile and (self.step == self.warmup_steps)):
profiler.start()
if (self.step > self.warmup_steps):
self.timestamps.append(time.time())
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
self.do_step()
def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
self.do_step()
def process_performance_stats(self, deltas):
def _round3(val):
return round(val, 3)
throughput_imgps = _round3((self.global_batch_size / np.mean(deltas)))
timestamps_ms = (1000 * deltas)
stats = {f'throughput': throughput_imgps, f'latency_mean': _round3(timestamps_ms.mean())}
for level in [90, 95, 99]:
stats.update({f'latency_{level}': _round3(np.percentile(timestamps_ms, level))})
return stats
def _log(self):
if is_main_process():
diffs = list(map(operator.sub, self.timestamps[1:], self.timestamps[:(- 1)]))
deltas = np.array(diffs)
stats = self.process_performance_stats(deltas)
logger.log(step=(), data=stats)
logger.flush()
def on_train_end(self, trainer, pl_module):
if self.profile:
profiler.stop()
self._log()
def on_epoch_end(self, trainer, pl_module):
self._log()
|
class AlphaFoldLRScheduler(torch.optim.lr_scheduler._LRScheduler):
" Implements the learning rate schedule defined in the AlphaFold 2\n supplement. A linear warmup is followed by a plateau at the maximum\n learning rate and then exponential decay.\n \n Note that the initial learning rate of the optimizer in question is \n ignored; use this class' base_lr parameter to specify the starting \n point of the warmup.\n "
def __init__(self, optimizer, last_epoch: int=(- 1), verbose: bool=False, base_lr: float=0.0, max_lr: float=0.001, warmup_no_steps: int=1000, start_decay_after_n_steps: int=50000, decay_every_n_steps: int=50000, decay_factor: float=0.95):
step_counts = {'warmup_no_steps': warmup_no_steps, 'start_decay_after_n_steps': start_decay_after_n_steps}
for (k, v) in step_counts.items():
if (v < 0):
raise ValueError(f'{k} must be nonnegative')
if (warmup_no_steps > start_decay_after_n_steps):
raise ValueError('warmup_no_steps must not exceed start_decay_after_n_steps')
self.optimizer = optimizer
self.last_epoch = last_epoch
self.verbose = verbose
self.base_lr = base_lr
self.max_lr = max_lr
self.warmup_no_steps = warmup_no_steps
self.start_decay_after_n_steps = start_decay_after_n_steps
self.decay_every_n_steps = decay_every_n_steps
self.decay_factor = decay_factor
super(AlphaFoldLRScheduler, self).__init__(optimizer, last_epoch=last_epoch, verbose=verbose)
def state_dict(self):
state_dict = {k: v for (k, v) in self.__dict__.items() if (k not in ['optimizer'])}
return state_dict
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_lr(self):
if (not self._get_lr_called_within_step):
raise RuntimeError('To get the last learning rate computed by the scheduler, use get_last_lr()')
step_no = self.last_epoch
if (step_no <= self.warmup_no_steps):
lr = (self.base_lr + ((step_no / self.warmup_no_steps) * self.max_lr))
elif (step_no > self.start_decay_after_n_steps):
steps_since_decay = (step_no - self.start_decay_after_n_steps)
exp = ((steps_since_decay // self.decay_every_n_steps) + 1)
lr = (self.max_lr * (self.decay_factor ** exp))
else:
lr = self.max_lr
return [lr for group in self.optimizer.param_groups]
|
def is_fp16_enabled():
fp16_enabled = (torch.get_autocast_gpu_dtype() == torch.float16)
fp16_enabled = (fp16_enabled and torch.is_autocast_enabled())
return fp16_enabled
|
def count_models_to_evaluate(openfold_checkpoint_path, jax_param_path):
model_count = 0
if openfold_checkpoint_path:
model_count += len(openfold_checkpoint_path.split(','))
if jax_param_path:
model_count += len(jax_param_path.split(','))
return model_count
|
def get_model_basename(model_path):
return os.path.splitext(os.path.basename(os.path.normpath(model_path)))[0]
|
def make_output_directory(output_dir, model_name, multiple_model_mode):
if multiple_model_mode:
prediction_dir = os.path.join(output_dir, 'predictions', model_name)
else:
prediction_dir = os.path.join(output_dir, 'predictions')
os.makedirs(prediction_dir, exist_ok=True)
return prediction_dir
|
def load_models_from_command_line(config, model_device, openfold_checkpoint_path, jax_param_path, output_dir):
multiple_model_mode = (count_models_to_evaluate(openfold_checkpoint_path, jax_param_path) > 1)
if multiple_model_mode:
logger.info(f'evaluating multiple models')
if jax_param_path:
for path in jax_param_path.split(','):
model_basename = get_model_basename(path)
model_version = '_'.join(model_basename.split('_')[1:])
model = AlphaFold(config)
model = model.eval()
import_jax_weights_(model, path, version=model_version)
model = model.to(model_device)
logger.info(f'Successfully loaded JAX parameters at {path}...')
output_directory = make_output_directory(output_dir, model_basename, multiple_model_mode)
(yield (model, output_directory))
if openfold_checkpoint_path:
for path in openfold_checkpoint_path.split(','):
model = AlphaFold(config)
model = model.eval()
checkpoint_basename = get_model_basename(path)
if os.path.isdir(path):
ckpt_path = os.path.join(output_dir, (checkpoint_basename + '.pt'))
if (not os.path.isfile(ckpt_path)):
convert_zero_checkpoint_to_fp32_state_dict(path, ckpt_path)
d = torch.load(ckpt_path)
model.load_state_dict(d['ema']['params'])
else:
ckpt_path = path
d = torch.load(ckpt_path)
if ('ema' in d):
d = d['ema']['params']
model.load_state_dict(d)
model = model.to(model_device)
logger.info(f'Loaded OpenFold parameters at {path}...')
output_directory = make_output_directory(output_dir, checkpoint_basename, multiple_model_mode)
(yield (model, output_directory))
if ((not jax_param_path) and (not openfold_checkpoint_path)):
raise ValueError('At least one of jax_param_path or openfold_checkpoint_path must be specified.')
|
def parse_fasta(data):
data = re.sub('>$', '', data, flags=re.M)
lines = [l.replace('\n', '') for prot in data.split('>') for l in prot.strip().split('\n', 1)][1:]
(tags, seqs) = (lines[::2], lines[1::2])
tags = [t.split()[0] for t in tags]
return (tags, seqs)
|
def update_timings(timing_dict, output_file=os.path.join(os.getcwd(), 'timings.json')):
'\n Write dictionary of one or more run step times to a file\n '
if os.path.exists(output_file):
with open(output_file, 'r') as f:
try:
timings = json.load(f)
except json.JSONDecodeError:
logger.info(f'Overwriting non-standard JSON in {output_file}.')
timings = {}
else:
timings = {}
timings.update(timing_dict)
with open(output_file, 'w') as f:
json.dump(timings, f)
return output_file
|
def run_model(model, batch, tag, output_dir):
with torch.no_grad():
template_enabled = model.config.template.enabled
model.config.template.enabled = (template_enabled and any([('template_' in k) for k in batch]))
logger.info(f'Running inference for {tag}...')
t = time.perf_counter()
out = model(batch)
inference_time = (time.perf_counter() - t)
logger.info(f'Inference time: {inference_time}')
update_timings({'inference': inference_time}, os.path.join(output_dir, 'timings.json'))
model.config.template.enabled = template_enabled
return out
|
def prep_output(out, batch, feature_dict, feature_processor, config_preset, multimer_ri_gap, subtract_plddt):
plddt = out['plddt']
plddt_b_factors = numpy.repeat(plddt[(..., None)], residue_constants.atom_type_num, axis=(- 1))
if subtract_plddt:
plddt_b_factors = (100 - plddt_b_factors)
template_domain_names = []
template_chain_index = None
if (feature_processor.config.common.use_templates and ('template_domain_names' in feature_dict)):
template_domain_names = [t.decode('utf-8') for t in feature_dict['template_domain_names']]
template_domain_names = template_domain_names[:feature_processor.config.predict.max_templates]
if ('template_chain_index' in feature_dict):
template_chain_index = feature_dict['template_chain_index']
template_chain_index = template_chain_index[:feature_processor.config.predict.max_templates]
no_recycling = feature_processor.config.common.max_recycling_iters
remark = ', '.join([f'no_recycling={no_recycling}', f'max_templates={feature_processor.config.predict.max_templates}', f'config_preset={config_preset}'])
ri = feature_dict['residue_index']
chain_index = ((ri - numpy.arange(ri.shape[0])) / multimer_ri_gap)
chain_index = chain_index.astype(numpy.int64)
cur_chain = 0
prev_chain_max = 0
for (i, c) in enumerate(chain_index):
if (c != cur_chain):
cur_chain = c
prev_chain_max = (i + (cur_chain * multimer_ri_gap))
batch['residue_index'][i] -= prev_chain_max
unrelaxed_protein = protein.from_prediction(features=batch, result=out, b_factors=plddt_b_factors, chain_index=chain_index, remark=remark, parents=template_domain_names, parents_chain_index=template_chain_index)
return unrelaxed_protein
|
def relax_protein(config, model_device, unrelaxed_protein, output_directory, output_name, cif_output):
amber_relaxer = relax.AmberRelaxation(use_gpu=(model_device != 'cpu'), **config.relax)
t = time.perf_counter()
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', default='')
if ('cuda' in model_device):
device_no = model_device.split(':')[(- 1)]
os.environ['CUDA_VISIBLE_DEVICES'] = device_no
(struct_str, _, _) = amber_relaxer.process(prot=unrelaxed_protein, cif_output=cif_output)
os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices
relaxation_time = (time.perf_counter() - t)
logger.info(f'Relaxation time: {relaxation_time}')
update_timings({'relaxation': relaxation_time}, os.path.join(output_directory, 'timings.json'))
suffix = '_relaxed.pdb'
if cif_output:
suffix = '_relaxed.cif'
relaxed_output_path = os.path.join(output_directory, f'{output_name}{suffix}')
with open(relaxed_output_path, 'w') as fp:
fp.write(struct_str)
logger.info(f'Relaxed output written to {relaxed_output_path}...')
|
def seed_globally(seed=None):
if ('PL_GLOBAL_SEED' not in os.environ):
if (seed is None):
seed = random.randint(0, np.iinfo(np.uint32).max)
os.environ['PL_GLOBAL_SEED'] = str(seed)
logging.info(f'os.environ["PL_GLOBAL_SEED"] set to {seed}')
with SuppressLogging(logging.INFO):
seed_everything(seed=None)
|
def _superimpose_np(reference, coords):
'\n Superimposes coordinates onto a reference by minimizing RMSD using SVD.\n\n Args:\n reference:\n [N, 3] reference array\n coords:\n [N, 3] array\n Returns:\n A tuple of [N, 3] superimposed coords and the final RMSD.\n '
sup = SVDSuperimposer()
sup.set(reference, coords)
sup.run()
return (sup.get_transformed(), sup.get_rms())
|
def _superimpose_single(reference, coords):
reference_np = reference.detach().cpu().numpy()
coords_np = coords.detach().cpu().numpy()
(superimposed, rmsd) = _superimpose_np(reference_np, coords_np)
return (coords.new_tensor(superimposed), coords.new_tensor(rmsd))
|
def superimpose(reference, coords, mask):
'\n Superimposes coordinates onto a reference by minimizing RMSD using SVD.\n\n Args:\n reference:\n [*, N, 3] reference tensor\n coords:\n [*, N, 3] tensor\n mask:\n [*, N] tensor\n Returns:\n A tuple of [*, N, 3] superimposed coords and [*] final RMSDs.\n '
def select_unmasked_coords(coords, mask):
return torch.masked_select(coords, (mask > 0.0)[(..., None)]).reshape((- 1), 3)
batch_dims = reference.shape[:(- 2)]
flat_reference = reference.reshape((((- 1),) + reference.shape[(- 2):]))
flat_coords = coords.reshape((((- 1),) + reference.shape[(- 2):]))
flat_mask = mask.reshape((((- 1),) + mask.shape[(- 1):]))
superimposed_list = []
rmsds = []
for (r, c, m) in zip(flat_reference, flat_coords, flat_mask):
r_unmasked_coords = select_unmasked_coords(r, m)
c_unmasked_coords = select_unmasked_coords(c, m)
(superimposed, rmsd) = _superimpose_single(r_unmasked_coords, c_unmasked_coords)
count = 0
superimposed_full_size = torch.zeros_like(r)
for (i, unmasked) in enumerate(m):
if unmasked:
superimposed_full_size[i] = superimposed[count]
count += 1
superimposed_list.append(superimposed_full_size)
rmsds.append(rmsd)
superimposed_stacked = torch.stack(superimposed_list, dim=0)
rmsds_stacked = torch.stack(rmsds, dim=0)
superimposed_reshaped = superimposed_stacked.reshape((batch_dims + coords.shape[(- 2):]))
rmsds_reshaped = rmsds_stacked.reshape(batch_dims)
return (superimposed_reshaped, rmsds_reshaped)
|
class SuppressStdout():
def __enter__(self):
self.stdout = sys.stdout
dev_null = open('/dev/null', 'w')
sys.stdout = dev_null
def __exit__(self, typ, value, traceback):
fp = sys.stdout
sys.stdout = self.stdout
fp.close()
|
class SuppressLogging():
def __init__(self, level):
self.level = level
def __enter__(self):
logging.disable(self.level)
def __exit__(self, typ, value, traceback):
logging.disable(logging.NOTSET)
|
def add(m1, m2, inplace):
if (not inplace):
m1 = (m1 + m2)
else:
m1 += m2
return m1
|
def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
zero_index = ((- 1) * len(inds))
first_inds = list(range(len(tensor.shape[:zero_index])))
return tensor.permute((first_inds + [(zero_index + i) for i in inds]))
|
def flatten_final_dims(t: torch.Tensor, no_dims: int):
return t.reshape((t.shape[:(- no_dims)] + ((- 1),)))
|
def masked_mean(mask, value, dim, eps=0.0001):
mask = mask.expand(*value.shape)
return (torch.sum((mask * value), dim=dim) / (eps + torch.sum(mask, dim=dim)))
|
def pts_to_distogram(pts, min_bin=2.3125, max_bin=21.6875, no_bins=64):
boundaries = torch.linspace(min_bin, max_bin, (no_bins - 1), device=pts.device)
dists = torch.sqrt(torch.sum(((pts.unsqueeze((- 2)) - pts.unsqueeze((- 3))) ** 2), dim=(- 1)))
return torch.bucketize(dists, boundaries)
|
def dict_multimap(fn, dicts):
first = dicts[0]
new_dict = {}
for (k, v) in first.items():
all_v = [d[k] for d in dicts]
if (type(v) is dict):
new_dict[k] = dict_multimap(fn, all_v)
else:
new_dict[k] = fn(all_v)
return new_dict
|
def one_hot(x, v_bins):
reshaped_bins = v_bins.view((((1,) * len(x.shape)) + (len(v_bins),)))
diffs = (x[(..., None)] - reshaped_bins)
am = torch.argmin(torch.abs(diffs), dim=(- 1))
return nn.functional.one_hot(am, num_classes=len(v_bins)).float()
|
def batched_gather(data, inds, dim=0, no_batch_dims=0):
ranges = []
for (i, s) in enumerate(data.shape[:no_batch_dims]):
r = torch.arange(s)
r = r.view(*(*((1,) * i), (- 1), *((1,) * ((len(inds.shape) - i) - 1))))
ranges.append(r)
remaining_dims = [slice(None) for _ in range((len(data.shape) - no_batch_dims))]
remaining_dims[((dim - no_batch_dims) if (dim >= 0) else dim)] = inds
ranges.extend(remaining_dims)
return data[ranges]
|
def dict_map(fn, dic, leaf_type):
new_dict = {}
for (k, v) in dic.items():
if (type(v) is dict):
new_dict[k] = dict_map(fn, v, leaf_type)
else:
new_dict[k] = tree_map(fn, v, leaf_type)
return new_dict
|
def tree_map(fn, tree, leaf_type):
if isinstance(tree, dict):
return dict_map(fn, tree, leaf_type)
elif isinstance(tree, list):
return [tree_map(fn, x, leaf_type) for x in tree]
elif isinstance(tree, tuple):
return tuple([tree_map(fn, x, leaf_type) for x in tree])
elif isinstance(tree, leaf_type):
return fn(tree)
else:
print(type(tree))
raise ValueError('Not supported')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.