code stringlengths 17 6.64M |
|---|
def precompute_alignments(tags, seqs, alignment_dir, args):
for (tag, seq) in zip(tags, seqs):
tmp_fasta_path = os.path.join(args.output_dir, f'tmp_{os.getpid()}.fasta')
with open(tmp_fasta_path, 'w') as fp:
fp.write(f'''>{tag}
{seq}''')
local_alignment_dir = os.path.join(alignment_dir, tag)
if ((args.use_precomputed_alignments is None) and (not os.path.isdir(local_alignment_dir))):
logger.info(f'Generating alignments for {tag}...')
os.makedirs(local_alignment_dir)
alignment_runner = data_pipeline.AlignmentRunner(jackhmmer_binary_path=args.jackhmmer_binary_path, hhblits_binary_path=args.hhblits_binary_path, hhsearch_binary_path=args.hhsearch_binary_path, uniref90_database_path=args.uniref90_database_path, mgnify_database_path=args.mgnify_database_path, bfd_database_path=args.bfd_database_path, uniclust30_database_path=args.uniclust30_database_path, pdb70_database_path=args.pdb70_database_path, no_cpus=args.cpus)
alignment_runner.run(tmp_fasta_path, local_alignment_dir)
else:
logger.info(f'Using precomputed alignments for {tag} at {alignment_dir}...')
os.remove(tmp_fasta_path)
|
def round_up_seqlen(seqlen):
return (int(math.ceil((seqlen / TRACING_INTERVAL))) * TRACING_INTERVAL)
|
def generate_feature_dict(tags, seqs, alignment_dir, data_processor, args):
tmp_fasta_path = os.path.join(args.output_dir, f'tmp_{os.getpid()}.fasta')
if (len(seqs) == 1):
tag = tags[0]
seq = seqs[0]
with open(tmp_fasta_path, 'w') as fp:
fp.write(f'''>{tag}
{seq}''')
local_alignment_dir = os.path.join(alignment_dir, tag)
feature_dict = data_processor.process_fasta(fasta_path=tmp_fasta_path, alignment_dir=local_alignment_dir)
else:
with open(tmp_fasta_path, 'w') as fp:
fp.write('\n'.join([f'''>{tag}
{seq}''' for (tag, seq) in zip(tags, seqs)]))
feature_dict = data_processor.process_multiseq_fasta(fasta_path=tmp_fasta_path, super_alignment_dir=alignment_dir)
os.remove(tmp_fasta_path)
return feature_dict
|
def list_files_with_extensions(dir, extensions):
return [f for f in os.listdir(dir) if f.endswith(extensions)]
|
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
config = model_config(args.config_preset, long_sequence_inference=args.long_sequence_inference)
if args.trace_model:
if (not config.data.predict.fixed_size):
raise ValueError('Tracing requires that fixed_size mode be enabled in the config')
template_featurizer = templates.TemplateHitFeaturizer(mmcif_dir=args.template_mmcif_dir, max_template_date=args.max_template_date, max_hits=config.data.predict.max_templates, kalign_binary_path=args.kalign_binary_path, release_dates_path=args.release_dates_path, obsolete_pdbs_path=args.obsolete_pdbs_path)
data_processor = data_pipeline.DataPipeline(template_featurizer=template_featurizer)
output_dir_base = args.output_dir
random_seed = args.data_random_seed
if (random_seed is None):
random_seed = random.randrange((2 ** 32))
np.random.seed(random_seed)
torch.manual_seed((random_seed + 1))
feature_processor = feature_pipeline.FeaturePipeline(config.data)
if (not os.path.exists(output_dir_base)):
os.makedirs(output_dir_base)
if (args.use_precomputed_alignments is None):
alignment_dir = os.path.join(output_dir_base, 'alignments')
else:
alignment_dir = args.use_precomputed_alignments
tag_list = []
seq_list = []
for fasta_file in list_files_with_extensions(args.fasta_dir, ('.fasta', '.fa')):
with open(os.path.join(args.fasta_dir, fasta_file), 'r') as fp:
data = fp.read()
(tags, seqs) = parse_fasta(data)
tag = '-'.join(tags)
tag_list.append((tag, tags))
seq_list.append(seqs)
seq_sort_fn = (lambda target: sum([len(s) for s in target[1]]))
sorted_targets = sorted(zip(tag_list, seq_list), key=seq_sort_fn)
feature_dicts = {}
model_generator = load_models_from_command_line(config, args.model_device, args.openfold_checkpoint_path, args.jax_param_path, args.output_dir)
for (model, output_directory) in model_generator:
cur_tracing_interval = 0
for ((tag, tags), seqs) in sorted_targets:
output_name = f'{tag}_{args.config_preset}'
if (args.output_postfix is not None):
output_name = f'{output_name}_{args.output_postfix}'
precompute_alignments(tags, seqs, alignment_dir, args)
feature_dict = feature_dicts.get(tag, None)
if (feature_dict is None):
feature_dict = generate_feature_dict(tags, seqs, alignment_dir, data_processor, args)
if args.trace_model:
n = feature_dict['aatype'].shape[(- 2)]
rounded_seqlen = round_up_seqlen(n)
feature_dict = pad_feature_dict_seq(feature_dict, rounded_seqlen)
feature_dicts[tag] = feature_dict
processed_feature_dict = feature_processor.process_features(feature_dict, mode='predict')
processed_feature_dict = {k: torch.as_tensor(v, device=args.model_device) for (k, v) in processed_feature_dict.items()}
if args.trace_model:
if (rounded_seqlen > cur_tracing_interval):
logger.info(f'Tracing model at {rounded_seqlen} residues...')
t = time.perf_counter()
trace_model_(model, processed_feature_dict)
tracing_time = (time.perf_counter() - t)
logger.info(f'Tracing time: {tracing_time}')
cur_tracing_interval = rounded_seqlen
out = run_model(model, processed_feature_dict, tag, args.output_dir)
processed_feature_dict = tensor_tree_map((lambda x: np.array(x[(..., (- 1))].cpu())), processed_feature_dict)
out = tensor_tree_map((lambda x: np.array(x.cpu())), out)
unrelaxed_protein = prep_output(out, processed_feature_dict, feature_dict, feature_processor, args.config_preset, args.multimer_ri_gap, args.subtract_plddt)
unrelaxed_file_suffix = '_unrelaxed.pdb'
if args.cif_output:
unrelaxed_file_suffix = '_unrelaxed.cif'
unrelaxed_output_path = os.path.join(output_directory, f'{output_name}{unrelaxed_file_suffix}')
with open(unrelaxed_output_path, 'w') as fp:
if args.cif_output:
fp.write(protein.to_modelcif(unrelaxed_protein))
else:
fp.write(protein.to_pdb(unrelaxed_protein))
logger.info(f'Output written to {unrelaxed_output_path}...')
if (not args.skip_relaxation):
logger.info(f'Running relaxation on {unrelaxed_output_path}...')
relax_protein(config, args.model_device, unrelaxed_protein, output_directory, output_name, args.cif_output)
if args.save_outputs:
output_dict_path = os.path.join(output_directory, f'{output_name}_output_dict.pkl')
with open(output_dict_path, 'wb') as fp:
pickle.dump(out, fp, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(f'Model output written to {output_dict_path}...')
|
def main(args):
db_path = os.path.join(args.output_db_path, f'{args.output_db_name}.db')
index_path = os.path.join(args.output_db_path, f'{args.output_db_name}.index')
db_fp = open(db_path, 'wb')
index = {}
db_offset = 0
for chain_alignment_dir in os.listdir(args.alignment_dir):
cad_path = os.path.join(args.alignment_dir, chain_alignment_dir)
for f in os.listdir(cad_path):
f_path = os.path.join(cad_path, f)
with open(f_path, 'rb') as fp:
file_bytes = fp.read()
l = len(file_bytes)
file_list = index.setdefault(chain_alignment_dir, [])
file_list.append((f, db_offset, l))
db_fp.write(file_bytes)
db_offset += l
db_fp.close()
with open(index_path, 'w') as fp:
json.dump(index, fp)
|
def main(args):
super_index = {}
for f in os.listdir(args.alignment_db_dir):
if (not (os.path.splitext(f)[(- 1)] == '.index')):
continue
with open(os.path.join(args.alignment_db_dir, f), 'r') as fp:
index = json.load(fp)
db_name = f'{os.path.splitext(f)[0]}.db'
for k in index:
super_index[k] = {'db': db_name, 'files': index[k]}
with open(os.path.join(args.output_dir, 'super.index'), 'w') as fp:
json.dump(super_index, fp)
|
def reshape_fn(of_param, af_weight):
transformations = {ParamType.LinearWeight: (lambda w: w.transpose((- 1), (- 2))), ParamType.LinearWeightMHA: (lambda w: w.transpose((- 1), (- 2)).reshape(af_weight.shape)), ParamType.LinearMHAOutputWeight: (lambda w: w.transpose((- 1), (- 2)).reshape(af_weight.shape)), ParamType.LinearBiasMHA: (lambda w: w.reshape(af_weight.shape)), ParamType.LinearWeightOPM: (lambda w: w.transpose((- 1), (- 2)).reshape(af_weight.shape)), ParamType.Other: (lambda w: w)}
if of_param.stacked:
of_weight = torch.stack([torch.Tensor(p) for p in of_param.param])
else:
of_weight = torch.Tensor(of_param.param)
return transformations[of_param.param_type](of_weight)
|
def transfer(of_dict, af_weight_template):
for k in of_dict:
if (type(of_dict[k]) == dict):
transfer(of_dict[k], af_weight_template[k])
else:
reshaped = reshape_fn(of_dict[k], af_weight_template[k])
reshaped = reshaped.detach().numpy()
np.copyto(af_weight_template[k], reshaped)
|
def main(args):
d = torch.load(args.of_pt_path)
config = model_config(args.config_preset)
model = AlphaFold(config)
model.load_state_dict(d)
translation = generate_translation_dict(model, args.config_preset)
translation = process_translation_dict(translation)
af_weight_template = np.load(args.template_npz_path)
af_weight_template = {k: v for (k, v) in af_weight_template.items() if (k in translation)}
zero = (lambda n: (n * 0))
af_weight_template = tree_map(zero, af_weight_template, np.ndarray)
transfer(translation, af_weight_template)
np.savez(args.out_path, **af_weight_template)
|
def main(args):
fasta = []
for fname in os.listdir(args.data_dir):
(basename, ext) = os.path.splitext(fname)
basename = basename.upper()
fpath = os.path.join(args.data_dir, fname)
if (ext == '.cif'):
with open(fpath, 'r') as fp:
mmcif_str = fp.read()
mmcif = mmcif_parsing.parse(file_id=basename, mmcif_string=mmcif_str)
if (mmcif.mmcif_object is None):
logging.warning(f'Failed to parse {fname}...')
if args.raise_errors:
raise list(mmcif.errors.values())[0]
else:
continue
mmcif = mmcif.mmcif_object
for (chain, seq) in mmcif.chain_to_seqres.items():
chain_id = '_'.join([basename, chain])
fasta.append(f'>{chain_id}')
fasta.append(seq)
elif (ext == '.core'):
with open(fpath, 'r') as fp:
core_str = fp.read()
core_protein = protein.from_proteinnet_string(core_str)
aatype = core_protein.aatype
seq = ''.join([residue_constants.restypes_with_x[aatype[i]] for i in range(len(aatype))])
fasta.append(f'>{basename}')
fasta.append(seq)
with open(args.output_path, 'w') as fp:
fp.write('\n'.join(fasta))
|
def generate_url(period, end_date):
return '/'.join(['https://www.cameo3d.org/', 'modeling', 'targets', period, 'ajax', f'?to_date={end_date}'])
|
def main(args):
data_dir_path = os.path.join(args.output_dir, 'data_dir')
fasta_dir_path = os.path.join(args.output_dir, 'fasta_dir')
os.makedirs(data_dir_path, exist_ok=True)
os.makedirs(fasta_dir_path, exist_ok=True)
url = generate_url(args.period, args.end_date)
raw_data = requests.get(url).text
parsed_data = json.loads(raw_data)
chain_data = parsed_data['aaData']
for chain in chain_data:
pdb_id = chain['pdbid']
chain_id = chain['pdbid_chain']
pdb_url = f'https://files.rcsb.org/view/{pdb_id.upper()}.cif'
pdb_file = requests.get(pdb_url).text
parsed_cif = mmcif_parsing.parse(file_id=pdb_id, mmcif_string=pdb_file)
mmcif_object = parsed_cif.mmcif_object
if (mmcif_object is None):
raise list(parsed_cif.errors.values())[0]
seq = mmcif_object.chain_to_seqres[chain_id]
if ((args.max_seqlen > 0) and (len(seq) > args.max_seqlen)):
continue
fasta_file = '\n'.join([f'>{pdb_id}_{chain_id}', seq])
fasta_filename = f'{pdb_id}_{chain_id}.fasta'
with open(os.path.join(fasta_dir_path, fasta_filename), 'w') as fp:
fp.write(fasta_file)
cif_filename = f'{pdb_id}.cif'
with open(os.path.join(data_dir_path, cif_filename), 'w') as fp:
fp.write(pdb_file)
|
def main(args):
template_featurizer = templates.TemplateHitFeaturizer(mmcif_dir=args.mmcif_dir, max_template_date=args.max_template_date, max_hits=20, kalign_binary_path=args.kalign_binary_path, release_dates_path=None, obsolete_pdbs_path=args.obsolete_pdbs_path)
data_pipeline = pipeline.DataPipeline(jackhmmer_binary_path=args.jackhmmer_binary_path, hhblits_binary_path=args.hhblits_binary_path, hhsearch_binary_path=args.hhsearch_binary_path, uniref90_database_path=args.uniref90_database_path, mgnify_database_path=args.mgnify_database_path, bfd_database_path=args.bfd_database_path, uniclust30_database_path=args.uniclust30_database_path, pdb70_database_path=args.pdb70_database_path, small_bfd_database_path=None, template_featurizer=template_featurizer, use_small_bfd=False)
feature_dict = data_pipeline.process(input_fasta_path=args.fasta_path, msa_output_dir=args.output_dir)
with open(os.path.join(args.output_dir, 'feature_dict.pickle'), 'wb') as fp:
pickle.dump(feature_dict, fp, protocol=pickle.HIGHEST_PROTOCOL)
|
def parse_file(f, args, chain_cluster_size_dict):
(file_id, ext) = os.path.splitext(f)
if (ext == '.cif'):
with open(os.path.join(args.data_dir, f), 'r') as fp:
mmcif_string = fp.read()
mmcif = parse(file_id=file_id, mmcif_string=mmcif_string)
if (mmcif.mmcif_object is None):
logging.info(f'Could not parse {f}. Skipping...')
return {}
else:
mmcif = mmcif.mmcif_object
out = {}
for (chain_id, seq) in mmcif.chain_to_seqres.items():
full_name = '_'.join([file_id, chain_id])
out[full_name] = {}
local_data = out[full_name]
local_data['release_date'] = mmcif.header['release_date']
local_data['seq'] = seq
local_data['resolution'] = mmcif.header['resolution']
if (chain_cluster_size_dict is not None):
cluster_size = chain_cluster_size_dict.get(full_name.upper(), (- 1))
local_data['cluster_size'] = cluster_size
elif (ext == '.pdb'):
with open(os.path.join(args.data_dir, f), 'r') as fp:
pdb_string = fp.read()
protein_object = protein.from_pdb_string(pdb_string, None)
chain_dict = {}
chain_dict['seq'] = residue_constants.aatype_to_str_sequence(protein_object.aatype)
chain_dict['resolution'] = 0.0
if (chain_cluster_size_dict is not None):
cluster_size = chain_cluster_size_dict.get(full_name.upper(), (- 1))
chain_dict['cluster_size'] = cluster_size
out = {file_id: chain_dict}
return out
|
def main(args):
chain_cluster_size_dict = None
if (args.cluster_file is not None):
chain_cluster_size_dict = {}
with open(args.cluster_file, 'r') as fp:
clusters = [l.strip() for l in fp.readlines()]
for cluster in clusters:
chain_ids = cluster.split()
cluster_len = len(chain_ids)
for chain_id in chain_ids:
chain_id = chain_id.upper()
chain_cluster_size_dict[chain_id] = cluster_len
accepted_exts = ['.cif', '.pdb']
files = list(os.listdir(args.data_dir))
files = [f for f in files if (os.path.splitext(f)[(- 1)] in accepted_exts)]
fn = partial(parse_file, args=args, chain_cluster_size_dict=chain_cluster_size_dict)
data = {}
with Pool(processes=args.no_workers) as p:
with tqdm(total=len(files)) as pbar:
for d in p.imap_unordered(fn, files, chunksize=args.chunksize):
data.update(d)
pbar.update()
with open(args.output_path, 'w') as fp:
fp.write(json.dumps(data, indent=4))
|
def parse_file(f, args):
with open(os.path.join(args.mmcif_dir, f), 'r') as fp:
mmcif_string = fp.read()
file_id = os.path.splitext(f)[0]
mmcif = parse(file_id=file_id, mmcif_string=mmcif_string)
if (mmcif.mmcif_object is None):
logging.info(f'Could not parse {f}. Skipping...')
return {}
else:
mmcif = mmcif.mmcif_object
local_data = {}
local_data['release_date'] = mmcif.header['release_date']
(chain_ids, seqs) = list(zip(*mmcif.chain_to_seqres.items()))
local_data['chain_ids'] = chain_ids
local_data['seqs'] = seqs
local_data['no_chains'] = len(chain_ids)
local_data['resolution'] = mmcif.header['resolution']
return {file_id: local_data}
|
def main(args):
files = [f for f in os.listdir(args.mmcif_dir) if ('.cif' in f)]
fn = partial(parse_file, args=args)
data = {}
with Pool(processes=args.no_workers) as p:
with tqdm(total=len(files)) as pbar:
for d in p.imap_unordered(fn, files, chunksize=args.chunksize):
data.update(d)
pbar.update()
with open(args.output_path, 'w') as fp:
fp.write(json.dumps(data, indent=4))
|
def run_seq_group_alignments(seq_groups, alignment_runner, args):
dirs = set(os.listdir(args.output_dir))
for (seq, names) in seq_groups:
first_name = names[0]
alignment_dir = os.path.join(args.output_dir, first_name)
try:
os.makedirs(alignment_dir)
except Exception as e:
logging.warning(f'Failed to create directory for {first_name} with exception {e}...')
continue
(fd, fasta_path) = tempfile.mkstemp(suffix='.fasta')
with os.fdopen(fd, 'w') as fp:
fp.write(f'''>query
{seq}''')
try:
alignment_runner.run(fasta_path, alignment_dir)
except:
logging.warning(f'Failed to run alignments for {first_name}. Skipping...')
os.remove(fasta_path)
os.rmdir(alignment_dir)
continue
os.remove(fasta_path)
for name in names[1:]:
if (name in dirs):
logging.warning(f'{name} has already been processed. Skipping...')
continue
cp_dir = os.path.join(args.output_dir, name)
os.makedirs(cp_dir, exist_ok=True)
for f in os.listdir(alignment_dir):
copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f))
|
def parse_and_align(files, alignment_runner, args):
for f in files:
path = os.path.join(args.input_dir, f)
file_id = os.path.splitext(f)[0]
seq_group_dict = {}
if f.endswith('.cif'):
with open(path, 'r') as fp:
mmcif_str = fp.read()
mmcif = mmcif_parsing.parse(file_id=file_id, mmcif_string=mmcif_str)
if (mmcif.mmcif_object is None):
logging.warning(f'Failed to parse {f}...')
if args.raise_errors:
raise list(mmcif.errors.values())[0]
else:
continue
mmcif = mmcif.mmcif_object
for (chain_letter, seq) in mmcif.chain_to_seqres.items():
chain_id = '_'.join([file_id, chain_letter])
l = seq_group_dict.setdefault(seq, [])
l.append(chain_id)
elif (f.endswith('.fasta') or f.endswith('.fa')):
with open(path, 'r') as fp:
fasta_str = fp.read()
(input_seqs, _) = parse_fasta(fasta_str)
if (len(input_seqs) != 1):
msg = f'More than one input_sequence found in {f}'
if args.raise_errors:
raise ValueError(msg)
else:
logging.warning(msg)
input_sequence = input_seqs[0]
seq_group_dict[input_sequence] = [file_id]
elif f.endswith('.core'):
with open(path, 'r') as fp:
core_str = fp.read()
core_prot = protein.from_proteinnet_string(core_str)
aatype = core_prot.aatype
seq = ''.join([residue_constants.restypes_with_x[aatype[i]] for i in range(len(aatype))])
seq_group_dict[seq] = [file_id]
else:
continue
seq_group_tuples = [(k, v) for (k, v) in seq_group_dict.items()]
run_seq_group_alignments(seq_group_tuples, alignment_runner, args)
|
def main(args):
alignment_runner = AlignmentRunner(jackhmmer_binary_path=args.jackhmmer_binary_path, hhblits_binary_path=args.hhblits_binary_path, hhsearch_binary_path=args.hhsearch_binary_path, uniref90_database_path=args.uniref90_database_path, mgnify_database_path=args.mgnify_database_path, bfd_database_path=args.bfd_database_path, uniclust30_database_path=args.uniclust30_database_path, pdb70_database_path=args.pdb70_database_path, use_small_bfd=(args.bfd_database_path is None), no_cpus=args.cpus_per_task)
files = list(os.listdir(args.input_dir))
if (args.mmcif_cache is not None):
with open(args.mmcif_cache, 'r') as fp:
cache = json.load(fp)
else:
cache = None
dirs = []
if ((cache is not None) and args.filter):
dirs = set(os.listdir(args.output_dir))
def prot_is_done(f):
prot_id = os.path.splitext(f)[0]
if (prot_id in cache):
chain_ids = cache[prot_id]['chain_ids']
for c in chain_ids:
full_name = ((prot_id + '_') + c)
if (not (full_name in dirs)):
return False
else:
return False
return True
files = [f for f in files if (not prot_is_done(f))]
def split_up_arglist(arglist):
if os.environ.get('SLURM_JOB_NUM_NODES', 0):
num_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])
if (num_nodes > 1):
node_id = int(os.environ['SLURM_NODEID'])
logging.warning(f'Num nodes: {num_nodes}')
logging.warning(f'Node ID: {node_id}')
arglist = arglist[node_id::num_nodes]
t_arglist = []
for i in range(args.no_tasks):
t_arglist.append(arglist[i::args.no_tasks])
return t_arglist
if ((cache is not None) and ('seqs' in next(iter(cache.values())))):
seq_group_dict = {}
for f in files:
prot_id = os.path.splitext(f)[0]
if (prot_id in cache):
prot_cache = cache[prot_id]
chains_seqs = zip(prot_cache['chain_ids'], prot_cache['seqs'])
for (chain, seq) in chains_seqs:
chain_name = ((prot_id + '_') + chain)
if (chain_name not in dirs):
l = seq_group_dict.setdefault(seq, [])
l.append(chain_name)
func = partial(run_seq_group_alignments, alignment_runner=alignment_runner, args=args)
seq_groups = [(k, v) for (k, v) in seq_group_dict.items()]
seq_groups = sorted(seq_groups, key=(lambda x: len(x[1])))
task_arglist = [[a] for a in split_up_arglist(seq_groups)]
else:
func = partial(parse_and_align, alignment_runner=alignment_runner, args=args)
task_arglist = [[a] for a in split_up_arglist(files)]
threads = []
for (i, task_args) in enumerate(task_arglist):
print(f'Started thread {i}...')
t = threading.Thread(target=func, args=task_args)
threads.append(t)
t.start()
for t in threads:
t.join()
|
def _split_a3ms(output_dir):
for fname in os.listdir(output_dir):
if (not (os.path.splitext(fname)[(- 1)] == '.a3m')):
continue
fpath = os.path.join(output_dir, fname)
with open(fpath, 'r') as fp:
a3ms = fp.read()
a3ms = a3ms.split('\x00')[:(- 1)]
for a3m in a3ms:
name = a3m.split('\n', 1)[0][1:]
prot_dir = os.path.join(output_dir, name)
Path(prot_dir).mkdir(parents=True, exist_ok=True)
with open(os.path.join(prot_dir, fname), 'w') as fp:
fp.write(a3m)
os.remove(fpath)
os.remove((fpath + '.dbtype'))
os.remove((fpath + '.index'))
|
def main(args):
with open(args.input_fasta, 'r') as f:
lines = [l.strip() for l in f.readlines()]
names = lines[::2]
seqs = lines[1::2]
if (args.fasta_chunk_size is None):
chunk_size = len(seqs)
else:
chunk_size = args.fasta_chunk_size
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
s = 0
while (s < len(seqs)):
e = (s + chunk_size)
chunk_fasta = [el for tup in zip(names[s:e], seqs[s:e]) for el in tup]
s = e
prot_dir = os.path.join(args.output_dir, chunk_fasta[0][1:].upper())
if os.path.exists(prot_dir):
continue
chunk_fasta_path = os.path.join(args.output_dir, 'tmp.fasta')
with open(chunk_fasta_path, 'w') as f:
f.write(('\n'.join(chunk_fasta) + '\n'))
cmd = ['scripts/colabfold_search.sh', args.mmseqs_binary_path, chunk_fasta_path, args.mmseqs_db_dir, args.output_dir, args.uniref_db, '""', ('""' if (args.env_db is None) else args.env_db), ('0' if (args.env_db is None) else '1'), '0', '1', '1', '0']
logging.info('Launching subprocess "%s"', ' '.join(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
retcode = process.wait()
if retcode:
raise RuntimeError(('MMseqs failed\nstdout:\n%s\n\nstderr:\n%s\n' % (stdout.decode('utf-8'), stderr.decode('utf-8'))))
_split_a3ms(args.output_dir)
os.remove(chunk_fasta_path)
hhsearch_pdb70_runner = hhsearch.HHSearch(binary_path=args.hhsearch_binary_path, databases=[args.pdb70])
for d in os.listdir(args.output_dir):
dpath = os.path.join(args.output_dir, d)
if (not os.path.isdir(dpath)):
continue
for fname in os.listdir(dpath):
fpath = os.path.join(dpath, fname)
if ((not ('uniref' in fname)) or (not (os.path.splitext(fname)[(- 1)] == '.a3m'))):
continue
with open(fpath, 'r') as fp:
a3m = fp.read()
hhsearch_result = hhsearch_pdb70_runner.query(a3m)
pdb70_out_path = os.path.join(dpath, 'pdb70_hits.hhr')
with open(pdb70_out_path, 'w') as f:
f.write(hhsearch_result)
|
def main(args):
count = 0
max_count = (args.max_count if (args.max_count is not None) else (- 1))
msas = sorted((f for f in os.listdir(args.msa_dir)))
mmcifs = sorted((f for f in os.listdir(args.mmcif_dir)))
mmcif_idx = 0
for f in msas:
if (count == max_count):
break
path = os.path.join(args.msa_dir, f)
name = os.path.splitext(f)[0]
spl = name.upper().split('_')
if (len(spl) != 3):
continue
(pdb_id, _, chain_id) = spl
while (pdb_id > os.path.splitext(mmcifs[mmcif_idx])[0].upper()):
mmcif_idx += 1
if (pdb_id == os.path.splitext(mmcifs[mmcif_idx])[0].upper()):
dirname = os.path.join(args.out_dir, '_'.join([pdb_id, chain_id]))
os.makedirs(dirname, exist_ok=True)
dest = os.path.join(dirname, f)
if args.copy:
shutil.copyfile(path, dest)
else:
os.rename(path, dest)
count += 1
|
def _write_file(args, file_in_progress):
file_id = file_in_progress[1]
fname = (file_id.upper() + '.core')
fpath = os.path.join(args.output_dir, fname)
with open(fpath, 'w') as fp:
fp.write('\n'.join(file_in_progress))
|
def main(args):
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
with open(args.proteinnet_file, 'r') as fp:
proteinnet_string = fp.readlines()
file_in_progress = []
for line in proteinnet_string:
if (line == '[ID]\n'):
if (len(file_in_progress) > 0):
_write_file(args, file_in_progress)
file_in_progress = []
file_in_progress.append(line.strip())
if (len(file_in_progress) > 0):
_write_file(args, file_in_progress)
|
def add_data_args(parser: argparse.ArgumentParser):
parser.add_argument('--uniref90_database_path', type=str, default=None)
parser.add_argument('--mgnify_database_path', type=str, default=None)
parser.add_argument('--pdb70_database_path', type=str, default=None)
parser.add_argument('--uniclust30_database_path', type=str, default=None)
parser.add_argument('--bfd_database_path', type=str, default=None)
parser.add_argument('--jackhmmer_binary_path', type=str, default='/usr/bin/jackhmmer')
parser.add_argument('--hhblits_binary_path', type=str, default='/usr/bin/hhblits')
parser.add_argument('--hhsearch_binary_path', type=str, default='/usr/bin/hhsearch')
parser.add_argument('--kalign_binary_path', type=str, default='/usr/bin/kalign')
parser.add_argument('--max_template_date', type=str, default=date.today().strftime('%Y-%m-%d'))
parser.add_argument('--obsolete_pdbs_path', type=str, default=None)
parser.add_argument('--release_dates_path', type=str, default=None)
|
def get_nvidia_cc():
'\n Returns a tuple containing the Compute Capability of the first GPU\n installed in the system (formatted as a tuple of strings) and an error\n message. When the former is provided, the latter is None, and vice versa.\n\n Adapted from script by Jan Schlüte t\n https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549\n '
CUDA_SUCCESS = 0
libnames = ['libcuda.so', 'libcuda.dylib', 'cuda.dll', '/usr/local/cuda/compat/libcuda.so']
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
except OSError:
continue
else:
break
else:
return (None, ('Could not load any of: ' + ' '.join(libnames)))
nGpus = ctypes.c_int()
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()
result = ctypes.c_int()
device = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
if (result != CUDA_SUCCESS):
cuda.cuGetErrorString(result, ctypes.byref(error_str))
if error_str.value:
return (None, error_str.value.decode())
else:
return (None, ('Unknown error: cuInit returned %d' % result))
result = cuda.cuDeviceGetCount(ctypes.byref(nGpus))
if (result != CUDA_SUCCESS):
cuda.cuGetErrorString(result, ctypes.byref(error_str))
return (None, error_str.value.decode())
if (nGpus.value < 1):
return (None, 'No GPUs detected')
result = cuda.cuDeviceGet(ctypes.byref(device), 0)
if (result != CUDA_SUCCESS):
cuda.cuGetErrorString(result, ctypes.byref(error_str))
return (None, error_str.value.decode())
if (cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor), device) != CUDA_SUCCESS):
return (None, 'Compute Capability not found')
major = cc_major.value
minor = cc_minor.value
return ((major, minor), None)
|
def get_model_state_file(checkpoint_dir, zero_stage):
if (not os.path.isdir(checkpoint_dir)):
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
if (zero_stage == 2):
file = os.path.join(checkpoint_dir, 'mp_rank_00_model_states.pt')
elif (zero_stage == 3):
file = os.path.join(checkpoint_dir, 'zero_pp_rank_0_mp_rank_00_model_states.pt')
if (not os.path.exists(file)):
raise FileNotFoundError(f"can't find model states file at '{file}'")
return file
|
def get_optim_files(checkpoint_dir):
optim_files = sorted(glob.glob(os.path.join(checkpoint_dir, '*_optim_states.pt')))
if (len(optim_files) == 0):
raise FileNotFoundError(f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
return optim_files
|
def parse_model_state(file):
state_dict = torch.load(file, map_location=device)
if ('buffer_names' not in state_dict):
raise ValueError(f'{file} is not a model state checkpoint')
buffer_names = state_dict['buffer_names']
if debug:
print('Found buffers:', buffer_names)
buffers = {k: v.float() for (k, v) in state_dict['module'].items() if (k in buffer_names)}
return buffers
|
def parse_optim_states(files, ds_checkpoint_dir):
total_files = len(files)
state_dicts = []
for f in files:
state_dicts.append(torch.load(f, map_location=device))
if (not ('zero_stage' in state_dicts[0]['optimizer_state_dict'])):
raise ValueError(f'{files[0]} is not a zero checkpoint')
zero_stage = state_dicts[0]['optimizer_state_dict']['zero_stage']
world_size = state_dicts[0]['optimizer_state_dict']['partition_count']
param_shapes = state_dicts[0]['param_shapes']
if (type(world_size) is list):
world_size = max(world_size)
if (world_size != total_files):
raise ValueError(f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes.")
if (zero_stage == 2):
fp32_groups_key = 'single_partition_of_fp32_groups'
elif (zero_stage == 3):
fp32_groups_key = 'fp32_flat_groups'
else:
raise ValueError(f'unknown zero stage {zero_stage}')
if (zero_stage == 2):
fp32_flat_groups = [state_dicts[i]['optimizer_state_dict'][fp32_groups_key] for i in range(len(state_dicts))]
elif (zero_stage == 3):
fp32_flat_groups = [torch.cat(state_dicts[i]['optimizer_state_dict'][fp32_groups_key], 0) for i in range(len(state_dicts))]
return (zero_stage, world_size, param_shapes, fp32_flat_groups)
|
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
'\n Returns fp32 state_dict reconstructed from ds checkpoint\n\n Args:\n - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)\n\n '
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
optim_files = get_optim_files(ds_checkpoint_dir)
(zero_stage, world_size, param_shapes, fp32_flat_groups) = parse_optim_states(optim_files, ds_checkpoint_dir)
print(f'Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}')
model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
buffers = parse_model_state(model_file)
if (zero_stage == 2):
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, param_shapes, fp32_flat_groups, buffers)
elif (zero_stage == 3):
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, param_shapes, fp32_flat_groups, buffers)
|
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, param_shapes, fp32_flat_groups, buffers):
if debug:
for i in range(world_size):
for j in range(len(fp32_flat_groups[0])):
print(f'fp32_flat_groups[{i}][{j}].shape={fp32_flat_groups[i][j].shape}')
num_param_groups = len(fp32_flat_groups[0])
merged_single_partition_of_fp32_groups = []
for i in range(num_param_groups):
merged_partitions = [sd[i] for sd in fp32_flat_groups]
full_single_fp32_vector = torch.cat(merged_partitions, 0)
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
avail_numel = sum([full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
if debug:
wanted_params = sum([len(shapes) for shapes in param_shapes])
wanted_numel = sum([sum((shape.numel() for shape in shapes.values())) for shapes in param_shapes])
print(f'Have {avail_numel} numels to process.')
print(f'Need {wanted_numel} numels in {wanted_params} params.')
state_dict = OrderedDict()
state_dict.update(buffers)
if debug:
print(f'added {len(buffers)} buffers')
total_numel = 0
total_params = 0
for (shapes, full_single_fp32_vector) in zip(param_shapes, merged_single_partition_of_fp32_groups):
offset = 0
avail_numel = full_single_fp32_vector.numel()
for (name, shape) in shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
if debug:
print(f'{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ')
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
offset += unpartitioned_numel
align_to = (2 * world_size)
def zero2_align(x):
return (align_to * math.ceil((x / align_to)))
if debug:
print(f'original offset={offset}, avail_numel={avail_numel}')
offset = zero2_align(offset)
avail_numel = zero2_align(avail_numel)
if debug:
print(f'aligned offset={offset}, avail_numel={avail_numel}')
if (offset != avail_numel):
raise ValueError(f'consumed {offset} numels out of {avail_numel} - something is wrong')
print(f'Reconstructed fp32 state dict with {total_params} params {total_numel} elements')
return state_dict
|
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
remainder = (unpartitioned_numel % world_size)
padding_numel = ((world_size - remainder) if remainder else 0)
partitioned_numel = math.ceil((unpartitioned_numel / world_size))
return (partitioned_numel, padding_numel)
|
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, param_shapes, fp32_flat_groups, buffers):
avail_numel = (fp32_flat_groups[0].numel() * world_size)
param_shapes = {k: v for d in param_shapes for (k, v) in d.items()}
if debug:
for i in range(world_size):
print(f'fp32_flat_groups[{i}].shape={fp32_flat_groups[i].shape}')
wanted_params = len(param_shapes)
wanted_numel = sum((shape.numel() for shape in param_shapes.values()))
print(f'Have {avail_numel} numels to process.')
print(f'Need {wanted_numel} numels in {wanted_params} params.')
state_dict = OrderedDict()
state_dict.update(buffers)
if debug:
print(f'added {len(buffers)} buffers')
offset = 0
total_numel = 0
total_params = 0
for (name, shape) in param_shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
(partitioned_numel, partitioned_padding_numel) = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(f'{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}')
state_dict[name] = torch.cat(tuple((fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size))), 0).narrow(0, 0, unpartitioned_numel).view(shape)
offset += partitioned_numel
offset *= world_size
if (offset != avail_numel):
raise ValueError(f'consumed {offset} numels out of {avail_numel} - something is wrong')
print(f'Reconstructed fp32 state dict with {total_params} params {total_numel} elements')
return state_dict
|
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
"\n Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with\n ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example\n via a model hub.\n\n Args:\n - ``checkpoint_dir``: path to the desired checkpoint folder\n - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``\n\n Returns:\n - pytorch ``state_dict``\n\n Note: this approach may not work if your application doesn't have sufficient free CPU memory and\n you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with\n the checkpoint.\n\n A typical usage might be ::\n\n from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint\n # do the training and checkpoint saving\n state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu\n model = model.cpu() # move to cpu\n model.load_state_dict(state_dict)\n # submit to model hub or save the model to share with others\n\n In this example the ``model`` will no longer be usable in the deepspeed context of the same\n application. i.e. you will need to re-initialize the deepspeed engine, since\n ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.\n\n If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.\n\n "
if (tag is None):
latest_path = os.path.join(checkpoint_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
if (not os.path.isdir(ds_checkpoint_dir)):
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
'\n Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be\n loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.\n\n Args:\n - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)\n - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)\n - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``\n '
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
print(f'Saving fp32 state dict to {output_file}')
torch.save(state_dict, output_file)
|
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
"\n 1. Put the provided model to cpu\n 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``\n 3. Load it into the provided model\n\n Args:\n - ``model``: the model object to update\n - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)\n - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``\n\n Returns:\n - ``model`: modified model\n\n Make sure you have plenty of CPU memory available before you call this function. If you don't\n have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it\n conveniently placed for you in the checkpoint folder.\n\n A typical usage might be ::\n\n from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint\n model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)\n # submit to model hub or save the model to share with others\n\n Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context\n of the same application. i.e. you will need to re-initialize the deepspeed engine, since\n ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.\n\n "
logger.info(f'Extracting fp32 weights')
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
logger.info(f'Overwriting model with fp32 weights')
model = model.cpu()
model.load_state_dict(state_dict, strict=False)
return model
|
def get_global_step_from_zero_checkpoint(checkpoint_dir):
global_step = (- 1)
latest_path = os.path.join(checkpoint_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
match = re.match('global_step([0-9]+)', tag)
global_step = int(match.group(1))
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
return global_step
|
def get_cuda_bare_metal_version(cuda_dir):
if ((cuda_dir == None) or (torch.version.cuda == None)):
print('CUDA is not found, cpu version is installed')
return (None, (- 1), 0)
else:
raw_output = subprocess.check_output([(cuda_dir + '/bin/nvcc'), '-V'], universal_newlines=True)
output = raw_output.split()
release_idx = (output.index('release') + 1)
release = output[release_idx].split('.')
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return (raw_output, bare_metal_major, bare_metal_minor)
|
def alphafold_is_installed():
return (importlib.util.find_spec('alphafold') is not None)
|
def skip_unless_alphafold_installed():
return unittest.skipUnless(alphafold_is_installed(), 'Requires AlphaFold')
|
def import_alphafold():
"\n If AlphaFold is installed using the provided setuptools script, this\n is necessary to expose all of AlphaFold's precious insides\n "
if ('alphafold' in sys.modules):
return sys.modules['alphafold']
module = importlib.import_module('alphafold')
submodules = pkgutil.walk_packages(module.__path__, prefix='alphafold.')
for submodule_info in submodules:
importlib.import_module(submodule_info.name)
sys.modules['alphafold'] = module
globals()['alphafold'] = module
return module
|
def get_alphafold_config():
config = alphafold.model.config.model_config('model_1_ptm')
config.model.global_config.deterministic = True
return config
|
def get_global_pretrained_openfold():
global _model
if (_model is None):
_model = AlphaFold(model_config('model_1_ptm'))
_model = _model.eval()
if (not os.path.exists(_param_path)):
raise FileNotFoundError('Cannot load pretrained parameters. Make sure to run the \n installation script before running tests.')
import_jax_weights_(_model, _param_path, version='model_1_ptm')
_model = _model.cuda()
return _model
|
def _get_orig_weights():
global _orig_weights
if (_orig_weights is None):
_orig_weights = np.load(_param_path)
return _orig_weights
|
def _remove_key_prefix(d, prefix):
for (k, v) in list(d.items()):
if k.startswith(prefix):
d.pop(k)
d[k[len(prefix):]] = v
|
def fetch_alphafold_module_weights(weight_path):
orig_weights = _get_orig_weights()
params = {k: v for (k, v) in orig_weights.items() if (weight_path in k)}
if ('/' in weight_path):
spl = weight_path.split('/')
spl = (spl if (len(spl[(- 1)]) != 0) else spl[:(- 1)])
module_name = spl[(- 1)]
prefix = ('/'.join(spl[:(- 1)]) + '/')
_remove_key_prefix(params, prefix)
try:
params = alphafold.model.utils.flat_params_to_haiku(params)
except:
raise ImportError('Make sure to call import_alphafold before running this function')
return params
|
class TestDataTransforms(unittest.TestCase):
def test_make_seq_mask(self):
seq = torch.tensor([range(20)], dtype=torch.int64).transpose(0, 1)
seq_one_hot = torch.FloatTensor(seq.shape[0], 20).zero_()
seq_one_hot.scatter_(1, seq, 1)
protein_aatype = seq_one_hot.clone().detach()
protein = {'aatype': protein_aatype}
protein = make_seq_mask(protein)
assert ('seq_mask' in protein)
assert (protein['seq_mask'].shape == torch.Size((seq.shape[0], 20)))
def test_add_distillation_flag(self):
protein = {}
protein = add_distillation_flag.__wrapped__(protein, True)
assert ('is_distillation' in protein)
assert (protein['is_distillation'] is True)
def test_make_all_atom_aatype(self):
seq = torch.tensor([range(20)], dtype=torch.int64).transpose(0, 1)
seq_one_hot = torch.FloatTensor(seq.shape[0], 20).zero_()
seq_one_hot.scatter_(1, seq, 1)
protein_aatype = seq_one_hot.clone().detach()
protein = {'aatype': protein_aatype}
protein = make_all_atom_aatype(protein)
assert ('all_atom_aatype' in protein)
assert (protein['all_atom_aatype'].shape == protein['aatype'].shape)
def test_fix_templates_aatype(self):
template_seq = torch.tensor((list(range(20)) * 2), dtype=torch.int64)
template_seq = template_seq.unsqueeze(0).transpose(0, 1)
template_seq_one_hot = torch.FloatTensor(template_seq.shape[0], 20).zero_()
template_seq_one_hot.scatter_(1, template_seq, 1)
template_aatype = template_seq_one_hot.clone().detach().unsqueeze(0)
protein = {'template_aatype': template_aatype, 'aatype': template_aatype}
protein = fix_templates_aatype(protein)
template_seq_ours = torch.tensor([([0, 4, 3, 6, 13, 7, 8, 9, 11, 10, 12, 2, 14, 5, 1, 15, 16, 19, 17, 18] * 2)])
assert torch.all(torch.eq(protein['template_aatype'], template_seq_ours))
def test_correct_msa_restypes(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
protein = {'msa': torch.tensor(features['msa'], dtype=torch.int64)}
protein = correct_msa_restypes(protein)
assert torch.all(torch.eq(torch.tensor(features['msa'].shape), torch.tensor(protein['msa'].shape)))
def test_squeeze_features(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
features_list = ['domain_name', 'msa', 'num_alignments', 'seq_length', 'sequence', 'superfamily', 'deletion_matrix', 'resolution', 'between_segment_residues', 'residue_index', 'template_all_atom_mask']
protein = {'aatype': torch.tensor(features['aatype'])}
for k in features_list:
if (k in features):
if (k in ['domain_name', 'sequence']):
protein[k] = np.expand_dims(features[k], (- 1))
else:
protein[k] = torch.tensor(features[k]).unsqueeze((- 1))
for k in ['seq_length', 'num_alignments']:
if (k in protein):
protein[k] = protein[k].clone().detach().unsqueeze(0)
protein_squeezed = squeeze_features(protein)
for k in features_list:
if (k in protein):
assert (protein_squeezed[k].shape == features[k].shape)
def test_randomly_replace_msa_with_unknown(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
protein = {'msa': torch.tensor(features['msa']), 'aatype': torch.argmax(torch.tensor(features['aatype']), dim=1)}
replace_proportion = 0.15
x_idx = 20
protein = randomly_replace_msa_with_unknown.__wrapped__(protein, replace_proportion)
unknown_proportion_in_msa = (torch.bincount(protein['msa'].flatten()) / torch.numel(protein['msa']))
unknown_proportion_in_seq = (torch.bincount(protein['aatype'].flatten()) / torch.numel(protein['aatype']))
def test_sample_msa(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
max_seq = 1000
keep_extra = True
protein = {}
for k in MSA_FEATURE_NAMES:
if (k in features):
protein[k] = torch.tensor(features[k])
protein_processed = sample_msa.__wrapped__(protein.copy(), max_seq, keep_extra)
for k in MSA_FEATURE_NAMES:
if ((k in protein) and keep_extra):
assert (protein_processed[k].shape[0] == min(protein[k].shape[0], max_seq))
assert (('extra_' + k) in protein_processed)
assert (protein_processed[('extra_' + k)].shape[0] == (protein[k].shape[0] - min(protein[k].shape[0], max_seq)))
def test_crop_extra_msa(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
max_extra_msa = 10
protein = {'extra_msa': torch.tensor(features['msa'])}
num_seq = protein['extra_msa'].shape[0]
protein = crop_extra_msa.__wrapped__(protein, max_extra_msa)
for k in MSA_FEATURE_NAMES:
if (('extra_' + k) in protein):
assert (protein[('extra_' + k)].shape[0] == min(max_extra_msa, num_seq))
def test_delete_extra_msa(self):
protein = {'extra_msa': torch.rand((512, 100, 23))}
extra_msa_has_deletion_shape = list(protein['extra_msa'].shape)
extra_msa_has_deletion_shape[2] = 1
protein['extra_deletion_matrix'] = torch.rand(extra_msa_has_deletion_shape)
protein = delete_extra_msa(protein)
for k in MSA_FEATURE_NAMES:
assert (('extra_' + k) not in protein)
assert ('extra_msa' not in protein)
def test_nearest_neighbor_clusters(self):
with gzip.open('tests/test_data/sample_feats.pickle.gz', 'rb') as f:
features = pickle.load(f)
protein = {'msa': torch.tensor(features['true_msa'][0], dtype=torch.int64), 'msa_mask': torch.tensor(features['msa_mask'][0], dtype=torch.int64), 'extra_msa': torch.tensor(features['extra_msa'][0], dtype=torch.int64), 'extra_msa_mask': torch.tensor(features['extra_msa_mask'][0], dtype=torch.int64)}
protein = nearest_neighbor_clusters.__wrapped__(protein, 0)
assert ('extra_cluster_assignment' in protein)
def test_make_msa_mask(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
msa_mat = torch.tensor(features['msa'])
protein = {'msa': msa_mat}
protein = make_msa_mask(protein)
assert ('msa_row_mask' in protein)
assert (protein['msa_row_mask'].shape[0] == msa_mat.shape[0])
def test_make_hhblits_profile(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
protein = {'msa': torch.tensor(features['msa'], dtype=torch.int64)}
protein = make_hhblits_profile(protein)
assert ('hhblits_profile' in protein)
assert (protein['hhblits_profile'].shape == torch.Size((protein['msa'].shape[1], 22)))
def test_make_masked_msa(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
protein = {'msa': torch.tensor(features['msa'], dtype=torch.int64), 'aatype': torch.tensor(features['aatype'], dtype=torch.int64)}
protein = make_hhblits_profile(protein)
masked_msa_config = config.data.common.masked_msa
protein = make_masked_msa.__wrapped__(protein, masked_msa_config, replace_fraction=0.15)
assert ('bert_mask' in protein)
assert ('true_msa' in protein)
assert ('msa' in protein)
assert (protein['bert_mask'].sum() >= 0)
assert torch.all(torch.eq((protein['true_msa'] * (1 - protein['bert_mask'])), (protein['msa'] * (1 - protein['bert_mask']))))
def test_make_msa_feat(self):
with open('tests/test_data/features.pkl', 'rb') as file:
features = pickle.load(file)
protein = {'between_segment_residues': torch.tensor(features['between_segment_residues']), 'msa': torch.tensor(features['msa'], dtype=torch.int64), 'deletion_matrix': torch.tensor(features['deletion_matrix_int']), 'aatype': torch.argmax(torch.tensor(features['aatype']), dim=1)}
protein = make_msa_feat.__wrapped__(protein)
assert ('msa_feat' in protein)
assert ('target_feat' in protein)
assert (protein['target_feat'].shape == torch.Size((protein['msa'].shape[1], 22)))
assert (protein['msa_feat'].shape == torch.Size((*protein['msa'].shape, 25)))
def test_crop_templates(self):
with gzip.open('tests/test_data/sample_feats.pickle.gz', 'rb') as f:
features = pickle.load(f)
protein = {'template_aatype': torch.tensor(features['true_msa'][0]), 'template_all_atom_masks': torch.tensor(features['msa_mask'][0])}
max_templates = 2
protein = crop_templates.__wrapped__(protein, max_templates)
assert (protein['template_aatype'].shape[0] == max_templates)
assert (protein['template_all_atom_masks'].shape[0] == max_templates)
def test_make_atom14_masks(self):
with gzip.open('tests/test_data/sample_feats.pickle.gz', 'rb') as file:
features = pickle.load(file)
protein = {'aatype': torch.tensor(features['aatype'][0])}
protein = make_atom14_masks(protein)
assert ('atom14_atom_exists' in protein)
assert ('residx_atom14_to_atom37' in protein)
assert ('residx_atom37_to_atom14' in protein)
assert ('atom37_atom_exists' in protein)
|
class TestInputEmbedder(unittest.TestCase):
def test_shape(self):
tf_dim = 2
msa_dim = 3
c_z = 5
c_m = 7
relpos_k = 11
b = 13
n_res = 17
n_clust = 19
tf = torch.rand((b, n_res, tf_dim))
ri = torch.rand((b, n_res))
msa = torch.rand((b, n_clust, n_res, msa_dim))
ie = InputEmbedder(tf_dim, msa_dim, c_z, c_m, relpos_k)
(msa_emb, pair_emb) = ie(tf, ri, msa)
self.assertTrue((msa_emb.shape == (b, n_clust, n_res, c_m)))
self.assertTrue((pair_emb.shape == (b, n_res, n_res, c_z)))
|
class TestRecyclingEmbedder(unittest.TestCase):
def test_shape(self):
batch_size = 2
n = 3
c_z = 5
c_m = 7
min_bin = 0
max_bin = 10
no_bins = 9
re = RecyclingEmbedder(c_m, c_z, min_bin, max_bin, no_bins)
m_1 = torch.rand((batch_size, n, c_m))
z = torch.rand((batch_size, n, n, c_z))
x = torch.rand((batch_size, n, 3))
(m_1, z) = re(m_1, z, x)
self.assertTrue((z.shape == (batch_size, n, n, c_z)))
self.assertTrue((m_1.shape == (batch_size, n, c_m)))
|
class TestTemplateAngleEmbedder(unittest.TestCase):
def test_shape(self):
template_angle_dim = 51
c_m = 256
batch_size = 4
n_templ = 4
n_res = 256
tae = TemplateAngleEmbedder(template_angle_dim, c_m)
x = torch.rand((batch_size, n_templ, n_res, template_angle_dim))
x = tae(x)
self.assertTrue((x.shape == (batch_size, n_templ, n_res, c_m)))
|
class TestTemplatePairEmbedder(unittest.TestCase):
def test_shape(self):
batch_size = 2
n_templ = 3
n_res = 5
template_pair_dim = 7
c_t = 11
tpe = TemplatePairEmbedder(template_pair_dim, c_t)
x = torch.rand((batch_size, n_templ, n_res, n_res, template_pair_dim))
x = tpe(x)
self.assertTrue((x.shape == (batch_size, n_templ, n_res, n_res, c_t)))
|
class TestEvoformerStack(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c_z = consts.c_z
c_hidden_msa_att = 12
c_hidden_opm = 17
c_hidden_mul = 19
c_hidden_pair_att = 14
c_s = consts.c_s
no_heads_msa = 3
no_heads_pair = 7
no_blocks = 2
transition_n = 2
msa_dropout = 0.15
pair_stack_dropout = 0.25
inf = 1000000000.0
eps = 1e-10
es = EvoformerStack(c_m, c_z, c_hidden_msa_att, c_hidden_opm, c_hidden_mul, c_hidden_pair_att, c_s, no_heads_msa, no_heads_pair, no_blocks, transition_n, msa_dropout, pair_stack_dropout, blocks_per_ckpt=None, inf=inf, eps=eps).eval()
m = torch.rand((batch_size, n_seq, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
msa_mask = torch.randint(0, 2, size=(batch_size, n_seq, n_res))
pair_mask = torch.randint(0, 2, size=(batch_size, n_res, n_res))
shape_m_before = m.shape
shape_z_before = z.shape
(m, z, s) = es(m, z, chunk_size=4, msa_mask=msa_mask, pair_mask=pair_mask)
self.assertTrue((m.shape == shape_m_before))
self.assertTrue((z.shape == shape_z_before))
self.assertTrue((s.shape == (batch_size, n_res, c_s)))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_ei(activations, masks):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
ei = alphafold.model.modules.EvoformerIteration(c_e, config.model.global_config, is_extra_msa=False)
return ei(activations, masks, is_training=False)
f = hk.transform(run_ei)
n_res = consts.n_res
n_seq = consts.n_seq
activations = {'msa': np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32), 'pair': np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)}
masks = {'msa': np.random.randint(0, 2, (n_seq, n_res)).astype(np.float32), 'pair': np.random.randint(0, 2, (n_res, n_res)).astype(np.float32)}
params = compare_utils.fetch_alphafold_module_weights('alphafold/alphafold_iteration/evoformer/evoformer_iteration')
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
key = jax.random.PRNGKey(42)
out_gt = f.apply(params, key, activations, masks)
jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt_msa = torch.as_tensor(np.array(out_gt['msa']))
out_gt_pair = torch.as_tensor(np.array(out_gt['pair']))
model = compare_utils.get_global_pretrained_openfold()
(out_repro_msa, out_repro_pair) = model.evoformer.blocks[0](torch.as_tensor(activations['msa']).cuda(), torch.as_tensor(activations['pair']).cuda(), torch.as_tensor(masks['msa']).cuda(), torch.as_tensor(masks['pair']).cuda(), chunk_size=4, _mask_trans=False, inplace_safe=False)
out_repro_msa = out_repro_msa.cpu()
out_repro_pair = out_repro_pair.cpu()
self.assertTrue((torch.mean(torch.abs((out_repro_msa - out_gt_msa))) < consts.eps))
self.assertTrue((torch.max(torch.abs((out_repro_pair - out_gt_pair))) < consts.eps))
(out_repro_msa, out_repro_pair) = model.evoformer.blocks[0](torch.as_tensor(activations['msa']).cuda(), torch.as_tensor(activations['pair']).cuda(), torch.as_tensor(masks['msa']).cuda(), torch.as_tensor(masks['pair']).cuda(), chunk_size=4, _mask_trans=False, inplace_safe=True)
out_repro_msa = out_repro_msa.cpu()
out_repro_pair = out_repro_pair.cpu()
self.assertTrue((torch.mean(torch.abs((out_repro_msa - out_gt_msa))) < consts.eps))
self.assertTrue((torch.max(torch.abs((out_repro_pair - out_gt_pair))) < consts.eps))
|
class TestExtraMSAStack(unittest.TestCase):
def test_shape(self):
batch_size = 2
s_t = 23
n_res = 5
c_m = 7
c_z = 11
c_hidden_msa_att = 12
c_hidden_opm = 17
c_hidden_mul = 19
c_hidden_tri_att = 16
no_heads_msa = 3
no_heads_pair = 8
no_blocks = 2
transition_n = 5
msa_dropout = 0.15
pair_stack_dropout = 0.25
inf = 1000000000.0
eps = 1e-10
es = ExtraMSAStack(c_m, c_z, c_hidden_msa_att, c_hidden_opm, c_hidden_mul, c_hidden_tri_att, no_heads_msa, no_heads_pair, no_blocks, transition_n, msa_dropout, pair_stack_dropout, ckpt=False, inf=inf, eps=eps).eval()
m = torch.rand((batch_size, s_t, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
msa_mask = torch.randint(0, 2, size=(batch_size, s_t, n_res))
pair_mask = torch.randint(0, 2, size=(batch_size, n_res, n_res))
shape_z_before = z.shape
z = es(m, z, chunk_size=4, msa_mask=msa_mask, pair_mask=pair_mask)
self.assertTrue((z.shape == shape_z_before))
|
class TestMSATransition(unittest.TestCase):
def test_shape(self):
batch_size = 2
s_t = 3
n_r = 5
c_m = 7
n = 11
mt = MSATransition(c_m, n)
m = torch.rand((batch_size, s_t, n_r, c_m))
shape_before = m.shape
m = mt(m, chunk_size=4)
shape_after = m.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_transition(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_trans = alphafold.model.modules.Transition(c_e.msa_transition, config.model.global_config, name='msa_transition')
act = msa_trans(act=msa_act, mask=msa_mask)
return act
f = hk.transform(run_msa_transition)
n_res = consts.n_res
n_seq = consts.n_seq
msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
msa_mask = np.ones((n_seq, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + 'msa_transition'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.evoformer.blocks[0].core.msa_transition(torch.as_tensor(msa_act, dtype=torch.float32).cuda(), mask=torch.as_tensor(msa_mask, dtype=torch.float32).cuda()).cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
|
class TestImportWeights(unittest.TestCase):
def test_import_jax_weights_(self):
npz_path = 'openfold/resources/params/params_model_1_ptm.npz'
c = model_config('model_1_ptm')
c.globals.blocks_per_ckpt = None
model = AlphaFold(c)
import_jax_weights_(model, npz_path)
data = np.load(npz_path)
prefix = 'alphafold/alphafold_iteration/'
test_pairs = [(torch.as_tensor(data[(prefix + 'structure_module/initial_projection//weights')]).transpose((- 1), (- 2)), model.structure_module.linear_in.weight), (torch.as_tensor(data[(prefix + 'evoformer/prev_pair_norm//offset')]), model.recycling_embedder.layer_norm_z.bias), (torch.as_tensor(data[(prefix + 'evoformer/evoformer_iteration/outer_product_mean/left_projection//weights')][1].transpose((- 1), (- 2))), model.evoformer.blocks[1].core.outer_product_mean.linear_1.weight)]
for (w_alpha, w_repro) in test_pairs:
self.assertTrue(torch.all((w_alpha == w_repro)))
|
def affine_vector_to_4x4(affine):
r = Rigid.from_tensor_7(affine)
return r.to_tensor_4x4()
|
class TestLoss(unittest.TestCase):
def test_run_torsion_angle_loss(self):
batch_size = consts.batch_size
n_res = consts.n_res
a = torch.rand((batch_size, n_res, 7, 2))
a_gt = torch.rand((batch_size, n_res, 7, 2))
a_alt_gt = torch.rand((batch_size, n_res, 7, 2))
loss = torsion_angle_loss(a, a_gt, a_alt_gt)
def test_run_fape(self):
batch_size = consts.batch_size
n_frames = 7
n_atoms = 5
x = torch.rand((batch_size, n_atoms, 3))
x_gt = torch.rand((batch_size, n_atoms, 3))
rots = torch.rand((batch_size, n_frames, 3, 3))
rots_gt = torch.rand((batch_size, n_frames, 3, 3))
trans = torch.rand((batch_size, n_frames, 3))
trans_gt = torch.rand((batch_size, n_frames, 3))
t = Rigid(Rotation(rot_mats=rots), trans)
t_gt = Rigid(Rotation(rot_mats=rots_gt), trans_gt)
frames_mask = torch.randint(0, 2, (batch_size, n_frames)).float()
positions_mask = torch.randint(0, 2, (batch_size, n_atoms)).float()
length_scale = 10
loss = compute_fape(pred_frames=t, target_frames=t_gt, frames_mask=frames_mask, pred_positions=x, target_positions=x_gt, positions_mask=positions_mask, length_scale=length_scale)
def test_run_between_residue_bond_loss(self):
bs = consts.batch_size
n = consts.n_res
pred_pos = torch.rand(bs, n, 14, 3)
pred_atom_mask = torch.randint(0, 2, (bs, n, 14))
residue_index = torch.arange(n).unsqueeze(0)
aatype = torch.randint(0, 22, (bs, n))
between_residue_bond_loss(pred_pos, pred_atom_mask, residue_index, aatype)
@compare_utils.skip_unless_alphafold_installed()
def test_between_residue_bond_loss_compare(self):
def run_brbl(pred_pos, pred_atom_mask, residue_index, aatype):
return alphafold.model.all_atom.between_residue_bond_loss(pred_pos, pred_atom_mask, residue_index, aatype)
f = hk.transform(run_brbl)
n_res = consts.n_res
pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
pred_atom_mask = np.random.randint(0, 2, (n_res, 14)).astype(np.float32)
residue_index = np.arange(n_res)
aatype = np.random.randint(0, 22, (n_res,))
out_gt = f.apply({}, None, pred_pos, pred_atom_mask, residue_index, aatype)
out_gt = jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt = jax.tree_map((lambda x: torch.tensor(np.copy(x))), out_gt)
out_repro = between_residue_bond_loss(torch.tensor(pred_pos).cuda(), torch.tensor(pred_atom_mask).cuda(), torch.tensor(residue_index).cuda(), torch.tensor(aatype).cuda())
out_repro = tensor_tree_map((lambda x: x.cpu()), out_repro)
for k in out_gt.keys():
self.assertTrue((torch.max(torch.abs((out_gt[k] - out_repro[k]))) < consts.eps))
def test_run_between_residue_clash_loss(self):
bs = consts.batch_size
n = consts.n_res
pred_pos = torch.rand(bs, n, 14, 3)
pred_atom_mask = torch.randint(0, 2, (bs, n, 14)).float()
atom14_atom_radius = torch.rand(bs, n, 14)
residue_index = torch.arange(n).unsqueeze(0)
loss = between_residue_clash_loss(pred_pos, pred_atom_mask, atom14_atom_radius, residue_index)
@compare_utils.skip_unless_alphafold_installed()
def test_between_residue_clash_loss_compare(self):
def run_brcl(pred_pos, atom_exists, atom_radius, res_ind):
return alphafold.model.all_atom.between_residue_clash_loss(pred_pos, atom_exists, atom_radius, res_ind)
f = hk.transform(run_brcl)
n_res = consts.n_res
pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
atom_exists = np.random.randint(0, 2, (n_res, 14)).astype(np.float32)
atom_radius = np.random.rand(n_res, 14).astype(np.float32)
res_ind = np.arange(n_res)
out_gt = f.apply({}, None, pred_pos, atom_exists, atom_radius, res_ind)
out_gt = jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt = jax.tree_map((lambda x: torch.tensor(np.copy(x))), out_gt)
out_repro = between_residue_clash_loss(torch.tensor(pred_pos).cuda(), torch.tensor(atom_exists).cuda(), torch.tensor(atom_radius).cuda(), torch.tensor(res_ind).cuda())
out_repro = tensor_tree_map((lambda x: x.cpu()), out_repro)
for k in out_gt.keys():
self.assertTrue((torch.max(torch.abs((out_gt[k] - out_repro[k]))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_compute_plddt_compare(self):
n_res = consts.n_res
logits = np.random.rand(n_res, 50)
out_gt = alphafold.common.confidence.compute_plddt(logits)
out_gt = torch.tensor(out_gt)
logits_t = torch.tensor(logits)
out_repro = compute_plddt(logits_t)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
def test_find_structural_violations(self):
n = consts.n_res
batch = {'atom14_atom_exists': torch.randint(0, 2, (n, 14)), 'residue_index': torch.arange(n), 'aatype': torch.randint(0, 20, (n,)), 'residx_atom14_to_atom37': torch.randint(0, 37, (n, 14)).long()}
pred_pos = torch.rand(n, 14, 3)
config = {'clash_overlap_tolerance': 1.5, 'violation_tolerance_factor': 12.0}
find_structural_violations(batch, pred_pos, **config)
@compare_utils.skip_unless_alphafold_installed()
def test_find_structural_violations_compare(self):
def run_fsv(batch, pos, config):
cwd = os.getcwd()
os.chdir('tests/test_data')
loss = alphafold.model.folding.find_structural_violations(batch, pos, config)
os.chdir(cwd)
return loss
f = hk.transform(run_fsv)
n_res = consts.n_res
batch = {'atom14_atom_exists': np.random.randint(0, 2, (n_res, 14)), 'residue_index': np.arange(n_res), 'aatype': np.random.randint(0, 20, (n_res,)), 'residx_atom14_to_atom37': np.random.randint(0, 37, (n_res, 14)).astype(np.int64)}
pred_pos = np.random.rand(n_res, 14, 3)
config = mlc.ConfigDict({'clash_overlap_tolerance': 1.5, 'violation_tolerance_factor': 12.0})
out_gt = f.apply({}, None, batch, pred_pos, config)
out_gt = jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt = jax.tree_map((lambda x: torch.tensor(np.copy(x))), out_gt)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
out_repro = find_structural_violations(batch, torch.tensor(pred_pos).cuda(), **config)
out_repro = tensor_tree_map((lambda x: x.cpu()), out_repro)
def compare(out):
(gt, repro) = out
assert (torch.max(torch.abs((gt - repro))) < consts.eps)
dict_multimap(compare, [out_gt, out_repro])
@compare_utils.skip_unless_alphafold_installed()
def test_compute_renamed_ground_truth_compare(self):
def run_crgt(batch, atom14_pred_pos):
return alphafold.model.folding.compute_renamed_ground_truth(batch, atom14_pred_pos)
f = hk.transform(run_crgt)
n_res = consts.n_res
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'aatype': np.random.randint(0, 20, (n_res,)), 'atom14_gt_positions': np.random.rand(n_res, 14, 3), 'atom14_gt_exists': np.random.randint(0, 2, (n_res, 14)).astype(np.float32), 'all_atom_mask': np.random.randint(0, 2, (n_res, 37)).astype(np.float32), 'all_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32)}
def _build_extra_feats_np():
b = tree_map((lambda n: torch.tensor(n)), batch, np.ndarray)
b = data_transforms.make_atom14_masks(b)
b = data_transforms.make_atom14_positions(b)
return tensor_tree_map((lambda t: np.array(t)), b)
batch = _build_extra_feats_np()
atom14_pred_pos = np.random.rand(n_res, 14, 3)
out_gt = f.apply({}, None, batch, atom14_pred_pos)
out_gt = jax.tree_map((lambda x: torch.tensor(np.array(x))), out_gt)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
atom14_pred_pos = torch.tensor(atom14_pred_pos).cuda()
out_repro = compute_renamed_ground_truth(batch, atom14_pred_pos)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
for k in out_repro:
self.assertTrue((torch.max(torch.abs((out_gt[k] - out_repro[k]))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_msa_loss_compare(self):
def run_msa_loss(value, batch):
config = compare_utils.get_alphafold_config()
msa_head = alphafold.model.modules.MaskedMsaHead(config.model.heads.masked_msa, config.model.global_config)
return msa_head.loss(value, batch)
f = hk.transform(run_msa_loss)
n_res = consts.n_res
n_seq = consts.n_seq
value = {'logits': np.random.rand(n_res, n_seq, 23).astype(np.float32)}
batch = {'true_msa': np.random.randint(0, 21, (n_res, n_seq)), 'bert_mask': np.random.randint(0, 2, (n_res, n_seq)).astype(np.float32)}
out_gt = f.apply({}, None, value, batch)['loss']
out_gt = torch.tensor(np.array(out_gt))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
with torch.no_grad():
out_repro = masked_msa_loss(value['logits'], **batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_distogram_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_distogram = config.model.heads.distogram
def run_distogram_loss(value, batch):
dist_head = alphafold.model.modules.DistogramHead(c_distogram, config.model.global_config)
return dist_head.loss(value, batch)
f = hk.transform(run_distogram_loss)
n_res = consts.n_res
value = {'logits': np.random.rand(n_res, n_res, c_distogram.num_bins).astype(np.float32), 'bin_edges': np.linspace(c_distogram.first_break, c_distogram.last_break, c_distogram.num_bins)}
batch = {'pseudo_beta': np.random.rand(n_res, 3).astype(np.float32), 'pseudo_beta_mask': np.random.randint(0, 2, (n_res,))}
out_gt = f.apply({}, None, value, batch)['loss']
out_gt = torch.tensor(np.array(out_gt))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
with torch.no_grad():
out_repro = distogram_loss(logits=value['logits'], min_bin=c_distogram.first_break, max_bin=c_distogram.last_break, no_bins=c_distogram.num_bins, **batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_experimentally_resolved_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_experimentally_resolved = config.model.heads.experimentally_resolved
def run_experimentally_resolved_loss(value, batch):
er_head = alphafold.model.modules.ExperimentallyResolvedHead(c_experimentally_resolved, config.model.global_config)
return er_head.loss(value, batch)
f = hk.transform(run_experimentally_resolved_loss)
n_res = consts.n_res
value = {'logits': np.random.rand(n_res, 37).astype(np.float32)}
batch = {'all_atom_mask': np.random.randint(0, 2, (n_res, 37)), 'atom37_atom_exists': np.random.randint(0, 2, (n_res, 37)), 'resolution': np.array(1.0)}
out_gt = f.apply({}, None, value, batch)['loss']
out_gt = torch.tensor(np.array(out_gt))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
with torch.no_grad():
out_repro = experimentally_resolved_loss(logits=value['logits'], min_resolution=c_experimentally_resolved.min_resolution, max_resolution=c_experimentally_resolved.max_resolution, **batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_supervised_chi_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_chi_loss = config.model.heads.structure_module
def run_supervised_chi_loss(value, batch):
ret = {'loss': jax.numpy.array(0.0)}
alphafold.model.folding.supervised_chi_loss(ret, batch, value, c_chi_loss)
return ret['loss']
f = hk.transform(run_supervised_chi_loss)
n_res = consts.n_res
value = {'sidechains': {'angles_sin_cos': np.random.rand(8, n_res, 7, 2).astype(np.float32), 'unnormalized_angles_sin_cos': np.random.rand(8, n_res, 7, 2).astype(np.float32)}}
batch = {'aatype': np.random.randint(0, 21, (n_res,)), 'seq_mask': np.random.randint(0, 2, (n_res,)), 'chi_mask': np.random.randint(0, 2, (n_res, 4)), 'chi_angles': np.random.rand(n_res, 4).astype(np.float32)}
out_gt = f.apply({}, None, value, batch)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
batch['chi_angles_sin_cos'] = torch.stack([torch.sin(batch['chi_angles']), torch.cos(batch['chi_angles'])], dim=(- 1))
with torch.no_grad():
out_repro = supervised_chi_loss(chi_weight=c_chi_loss.chi_weight, angle_norm_weight=c_chi_loss.angle_norm_weight, **{**batch, **value['sidechains']})
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_violation_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_viol = config.model.heads.structure_module
def run_viol_loss(batch, atom14_pred_pos):
ret = {'loss': np.array(0.0).astype(np.float32)}
value = {}
value['violations'] = alphafold.model.folding.find_structural_violations(batch, atom14_pred_pos, c_viol)
alphafold.model.folding.structural_violation_loss(ret, batch, value, c_viol)
return ret['loss']
f = hk.transform(run_viol_loss)
n_res = consts.n_res
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'residue_index': np.arange(n_res), 'aatype': np.random.randint(0, 21, (n_res,))}
alphafold.model.tf.data_transforms.make_atom14_masks(batch)
batch = {k: np.array(v) for (k, v) in batch.items()}
atom14_pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
out_gt = f.apply({}, None, batch, atom14_pred_pos)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
batch = tree_map((lambda n: torch.tensor(n).cuda()), batch, np.ndarray)
atom14_pred_pos = torch.tensor(atom14_pred_pos).cuda()
batch = data_transforms.make_atom14_masks(batch)
out_repro = violation_loss(find_structural_violations(batch, atom14_pred_pos, **c_viol), **batch)
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_lddt_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_plddt = config.model.heads.predicted_lddt
def run_plddt_loss(value, batch):
head = alphafold.model.modules.PredictedLDDTHead(c_plddt, config.model.global_config)
return head.loss(value, batch)
f = hk.transform(run_plddt_loss)
n_res = consts.n_res
value = {'predicted_lddt': {'logits': np.random.rand(n_res, c_plddt.num_bins).astype(np.float32)}, 'structure_module': {'final_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32)}}
batch = {'all_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32), 'all_atom_mask': np.random.randint(0, 2, (n_res, 37)).astype(np.float32), 'resolution': np.array(1.0).astype(np.float32)}
out_gt = f.apply({}, None, value, batch)
out_gt = torch.tensor(np.array(out_gt['loss']))
to_tensor = (lambda t: torch.tensor(t).cuda())
value = tree_map(to_tensor, value, np.ndarray)
batch = tree_map(to_tensor, batch, np.ndarray)
out_repro = lddt_loss(logits=value['predicted_lddt']['logits'], all_atom_pred_pos=value['structure_module']['final_atom_positions'], **{**batch, **c_plddt})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_backbone_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_sm = config.model.heads.structure_module
def run_bb_loss(batch, value):
ret = {'loss': np.array(0.0)}
alphafold.model.folding.backbone_loss(ret, batch, value, c_sm)
return ret['loss']
f = hk.transform(run_bb_loss)
n_res = consts.n_res
batch = {'backbone_affine_tensor': random_affines_vector((n_res,)), 'backbone_affine_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'use_clamped_fape': np.array(0.0)}
value = {'traj': random_affines_vector((c_sm.num_layer, n_res))}
out_gt = f.apply({}, None, batch, value)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
to_tensor = (lambda t: torch.tensor(t).cuda())
batch = tree_map(to_tensor, batch, np.ndarray)
value = tree_map(to_tensor, value, np.ndarray)
batch['backbone_rigid_tensor'] = affine_vector_to_4x4(batch['backbone_affine_tensor'])
batch['backbone_rigid_mask'] = batch['backbone_affine_mask']
out_repro = backbone_loss(traj=value['traj'], **{**batch, **c_sm})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_sidechain_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_sm = config.model.heads.structure_module
def run_sidechain_loss(batch, value, atom14_pred_positions):
batch = {**batch, **alphafold.model.all_atom.atom37_to_frames(batch['aatype'], batch['all_atom_positions'], batch['all_atom_mask'])}
v = {}
v['sidechains'] = {}
v['sidechains']['frames'] = alphafold.model.r3.rigids_from_tensor4x4(value['sidechains']['frames'])
v['sidechains']['atom_pos'] = alphafold.model.r3.vecs_from_tensor(value['sidechains']['atom_pos'])
v.update(alphafold.model.folding.compute_renamed_ground_truth(batch, atom14_pred_positions))
value = v
ret = alphafold.model.folding.sidechain_loss(batch, value, c_sm)
return ret['loss']
f = hk.transform(run_sidechain_loss)
n_res = consts.n_res
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'aatype': np.random.randint(0, 20, (n_res,)), 'atom14_gt_positions': np.random.rand(n_res, 14, 3).astype(np.float32), 'atom14_gt_exists': np.random.randint(0, 2, (n_res, 14)).astype(np.float32), 'all_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32), 'all_atom_mask': np.random.randint(0, 2, (n_res, 37)).astype(np.float32)}
def _build_extra_feats_np():
b = tree_map((lambda n: torch.tensor(n)), batch, np.ndarray)
b = data_transforms.make_atom14_masks(b)
b = data_transforms.make_atom14_positions(b)
return tensor_tree_map((lambda t: np.array(t)), b)
batch = _build_extra_feats_np()
value = {'sidechains': {'frames': random_affines_4x4((c_sm.num_layer, n_res, 8)), 'atom_pos': np.random.rand(c_sm.num_layer, n_res, 14, 3).astype(np.float32)}}
atom14_pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
out_gt = f.apply({}, None, batch, value, atom14_pred_pos)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
to_tensor = (lambda t: torch.tensor(t).cuda())
batch = tree_map(to_tensor, batch, np.ndarray)
value = tree_map(to_tensor, value, np.ndarray)
atom14_pred_pos = to_tensor(atom14_pred_pos)
batch = data_transforms.atom37_to_frames(batch)
batch.update(compute_renamed_ground_truth(batch, atom14_pred_pos))
out_repro = sidechain_loss(sidechain_frames=value['sidechains']['frames'], sidechain_atom_pos=value['sidechains']['atom_pos'], **{**batch, **c_sm})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_tm_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_tm = config.model.heads.predicted_aligned_error
def run_tm_loss(representations, batch, value):
head = alphafold.model.modules.PredictedAlignedErrorHead(c_tm, config.model.global_config)
v = {}
v.update(value)
v['predicted_aligned_error'] = head(representations, batch, False)
return head.loss(v, batch)['loss']
f = hk.transform(run_tm_loss)
np.random.seed(42)
n_res = consts.n_res
representations = {'pair': np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)}
batch = {'backbone_affine_tensor': random_affines_vector((n_res,)), 'backbone_affine_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'resolution': np.array(1.0).astype(np.float32)}
value = {'structure_module': {'final_affines': random_affines_vector((n_res,))}}
params = compare_utils.fetch_alphafold_module_weights('alphafold/alphafold_iteration/predicted_aligned_error_head')
out_gt = f.apply(params, None, representations, batch, value)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
to_tensor = (lambda n: torch.tensor(n).cuda())
representations = tree_map(to_tensor, representations, np.ndarray)
batch = tree_map(to_tensor, batch, np.ndarray)
value = tree_map(to_tensor, value, np.ndarray)
batch['backbone_rigid_tensor'] = affine_vector_to_4x4(batch['backbone_affine_tensor'])
batch['backbone_rigid_mask'] = batch['backbone_affine_mask']
model = compare_utils.get_global_pretrained_openfold()
logits = model.aux_heads.tm(representations['pair'])
out_repro = tm_loss(logits=logits, final_affine_tensor=value['structure_module']['final_affines'], **{**batch, **c_tm})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
|
class TestModel(unittest.TestCase):
def test_dry_run(self):
n_seq = consts.n_seq
n_templ = consts.n_templ
n_res = consts.n_res
n_extra_seq = consts.n_extra
c = model_config('model_1')
c.model.evoformer_stack.no_blocks = 4
c.model.evoformer_stack.blocks_per_ckpt = None
model = AlphaFold(c)
batch = {}
tf = torch.randint((c.model.input_embedder.tf_dim - 1), size=(n_res,))
batch['target_feat'] = nn.functional.one_hot(tf, c.model.input_embedder.tf_dim).float()
batch['aatype'] = torch.argmax(batch['target_feat'], dim=(- 1))
batch['residue_index'] = torch.arange(n_res)
batch['msa_feat'] = torch.rand((n_seq, n_res, c.model.input_embedder.msa_dim))
t_feats = random_template_feats(n_templ, n_res)
batch.update({k: torch.tensor(v) for (k, v) in t_feats.items()})
extra_feats = random_extra_msa_feats(n_extra_seq, n_res)
batch.update({k: torch.tensor(v) for (k, v) in extra_feats.items()})
batch['msa_mask'] = torch.randint(low=0, high=2, size=(n_seq, n_res)).float()
batch['seq_mask'] = torch.randint(low=0, high=2, size=(n_res,)).float()
batch.update(data_transforms.make_atom14_masks(batch))
batch['no_recycling_iters'] = torch.tensor(2.0)
add_recycling_dims = (lambda t: t.unsqueeze((- 1)).expand(*t.shape, c.data.common.max_recycling_iters))
batch = tensor_tree_map(add_recycling_dims, batch)
with torch.no_grad():
out = model(batch)
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_alphafold(batch):
config = compare_utils.get_alphafold_config()
model = alphafold.model.modules.AlphaFold(config.model)
return model(batch=batch, is_training=False, return_representations=True)
f = hk.transform(run_alphafold)
params = compare_utils.fetch_alphafold_module_weights('')
with open('tests/test_data/sample_feats.pickle', 'rb') as fp:
batch = pickle.load(fp)
out_gt = f.apply(params, jax.random.PRNGKey(42), batch)
out_gt = out_gt['structure_module']['final_atom_positions']
batch['residx_atom14_to_atom37'] = batch['residx_atom14_to_atom37'][0]
batch['atom14_atom_exists'] = batch['atom14_atom_exists'][0]
out_gt = alphafold.model.all_atom.atom37_to_atom14(out_gt, batch)
out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
batch['no_recycling_iters'] = np.array([3.0, 3.0, 3.0, 3.0])
batch = {k: torch.as_tensor(v).cuda() for (k, v) in batch.items()}
batch['aatype'] = batch['aatype'].long()
batch['template_aatype'] = batch['template_aatype'].long()
batch['extra_msa'] = batch['extra_msa'].long()
batch['residx_atom37_to_atom14'] = batch['residx_atom37_to_atom14'].long()
batch['template_all_atom_mask'] = batch['template_all_atom_masks']
batch.update(data_transforms.atom37_to_torsion_angles('template_')(batch))
move_dim = (lambda t: t.permute(*range(len(t.shape))[1:], 0))
batch = tensor_tree_map(move_dim, batch)
with torch.no_grad():
model = compare_utils.get_global_pretrained_openfold()
out_repro = model(batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
out_repro = out_repro['sm']['positions'][(- 1)]
out_repro = out_repro.squeeze(0)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < 0.001))
|
class TestMSARowAttentionWithPairBias(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c_z = consts.c_z
c = 52
no_heads = 4
chunk_size = None
mrapb = MSARowAttentionWithPairBias(c_m, c_z, c, no_heads)
m = torch.rand((batch_size, n_seq, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
shape_before = m.shape
m = mrapb(m, z=z, chunk_size=chunk_size)
shape_after = m.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_row_att(msa_act, msa_mask, pair_act):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_row = alphafold.model.modules.MSARowAttentionWithPairBias(c_e.msa_row_attention_with_pair_bias, config.model.global_config)
act = msa_row(msa_act=msa_act, msa_mask=msa_mask, pair_act=pair_act)
return act
f = hk.transform(run_msa_row_att)
n_res = consts.n_res
n_seq = consts.n_seq
msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res)).astype(np.float32)
pair_act = np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + 'msa_row_attention'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask, pair_act).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.evoformer.blocks[0].msa_att_row(torch.as_tensor(msa_act).cuda(), z=torch.as_tensor(pair_act).cuda(), chunk_size=4, mask=torch.as_tensor(msa_mask).cuda()).cpu()
self.assertTrue((torch.mean(torch.abs((out_gt - out_repro))) < consts.eps))
|
class TestMSAColumnAttention(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c = 44
no_heads = 4
msaca = MSAColumnAttention(c_m, c, no_heads)
x = torch.rand((batch_size, n_seq, n_res, c_m))
shape_before = x.shape
x = msaca(x, chunk_size=None)
shape_after = x.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_col_att(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_col = alphafold.model.modules.MSAColumnAttention(c_e.msa_column_attention, config.model.global_config)
act = msa_col(msa_act=msa_act, msa_mask=msa_mask)
return act
f = hk.transform(run_msa_col_att)
n_res = consts.n_res
n_seq = consts.n_seq
msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + 'msa_column_attention'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.evoformer.blocks[0].msa_att_col(torch.as_tensor(msa_act).cuda(), chunk_size=4, mask=torch.as_tensor(msa_mask).cuda()).cpu()
self.assertTrue((torch.mean(torch.abs((out_gt - out_repro))) < consts.eps))
|
class TestMSAColumnGlobalAttention(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c = 44
no_heads = 4
msagca = MSAColumnGlobalAttention(c_m, c, no_heads)
x = torch.rand((batch_size, n_seq, n_res, c_m))
shape_before = x.shape
x = msagca(x, chunk_size=None)
shape_after = x.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_col_global_att(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_col = alphafold.model.modules.MSAColumnGlobalAttention(c_e.msa_column_attention, config.model.global_config, name='msa_column_global_attention')
act = msa_col(msa_act=msa_act, msa_mask=msa_mask)
return act
f = hk.transform(run_msa_col_global_att)
n_res = consts.n_res
n_seq = consts.n_seq
c_e = consts.c_e
msa_act = np.random.rand(n_seq, n_res, c_e)
msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res))
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/extra_msa_stack/' + 'msa_column_global_attention'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.extra_msa_stack.blocks[0].msa_att_col(torch.as_tensor(msa_act, dtype=torch.float32).cuda(), chunk_size=4, mask=torch.as_tensor(msa_mask, dtype=torch.float32).cuda()).cpu()
self.assertTrue(torch.max((torch.abs((out_gt - out_repro)) < consts.eps)))
|
class TestOuterProductMean(unittest.TestCase):
def test_shape(self):
c = 31
opm = OuterProductMean(consts.c_m, consts.c_z, c)
m = torch.rand((consts.batch_size, consts.n_seq, consts.n_res, consts.c_m))
mask = torch.randint(0, 2, size=(consts.batch_size, consts.n_seq, consts.n_res))
m = opm(m, mask=mask, chunk_size=None)
self.assertTrue((m.shape == (consts.batch_size, consts.n_res, consts.n_res, consts.c_z)))
@compare_utils.skip_unless_alphafold_installed()
def test_opm_compare(self):
def run_opm(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_evo = config.model.embeddings_and_evoformer.evoformer
opm = alphafold.model.modules.OuterProductMean(c_evo.outer_product_mean, config.model.global_config, consts.c_z)
act = opm(act=msa_act, mask=msa_mask)
return act
f = hk.transform(run_opm)
n_res = consts.n_res
n_seq = consts.n_seq
c_m = consts.c_m
msa_act = (np.random.rand(n_seq, n_res, c_m).astype(np.float32) * 100)
msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/' + 'evoformer_iteration/outer_product_mean'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.evoformer.blocks[0].core.outer_product_mean(torch.as_tensor(msa_act).cuda(), chunk_size=4, mask=torch.as_tensor(msa_mask).cuda()).cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < 0.0005))
|
class TestPairTransition(unittest.TestCase):
def test_shape(self):
c_z = consts.c_z
n = 4
pt = PairTransition(c_z, n)
batch_size = consts.batch_size
n_res = consts.n_res
z = torch.rand((batch_size, n_res, n_res, c_z))
mask = torch.randint(0, 2, size=(batch_size, n_res, n_res))
shape_before = z.shape
z = pt(z, mask=mask, chunk_size=None)
shape_after = z.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_pair_transition(pair_act, pair_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
pt = alphafold.model.modules.Transition(c_e.pair_transition, config.model.global_config, name='pair_transition')
act = pt(act=pair_act, mask=pair_mask)
return act
f = hk.transform(run_pair_transition)
n_res = consts.n_res
pair_act = np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)
pair_mask = np.ones((n_res, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + 'pair_transition'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, pair_act, pair_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.evoformer.blocks[0].core.pair_transition(torch.as_tensor(pair_act, dtype=torch.float32).cuda(), chunk_size=4, mask=torch.as_tensor(pair_mask, dtype=torch.float32).cuda()).cpu()
self.assertTrue(torch.max((torch.abs((out_gt - out_repro)) < consts.eps)))
|
class TestLMA(unittest.TestCase):
def test_lma_vs_attention(self):
batch_size = consts.batch_size
c_hidden = 32
n = (2 ** 12)
no_heads = 4
q = torch.rand(batch_size, n, c_hidden).cuda()
kv = torch.rand(batch_size, n, c_hidden).cuda()
bias = [torch.rand(no_heads, 1, n)]
bias = [b.cuda() for b in bias]
gating_fill = torch.rand((c_hidden * no_heads), c_hidden)
o_fill = torch.rand(c_hidden, (c_hidden * no_heads))
a = Attention(c_hidden, c_hidden, c_hidden, c_hidden, no_heads).cuda()
with torch.no_grad():
l = a(q, kv, biases=bias, use_lma=True)
real = a(q, kv, biases=bias)
self.assertTrue((torch.max(torch.abs((l - real))) < consts.eps))
|
class TestStructureModule(unittest.TestCase):
def test_structure_module_shape(self):
batch_size = consts.batch_size
n = consts.n_res
c_s = consts.c_s
c_z = consts.c_z
c_ipa = 13
c_resnet = 17
no_heads_ipa = 6
no_query_points = 4
no_value_points = 4
dropout_rate = 0.1
no_layers = 3
no_transition_layers = 3
no_resnet_layers = 3
ar_epsilon = 1e-06
no_angles = 7
trans_scale_factor = 10
inf = 100000.0
sm = StructureModule(c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_query_points, no_value_points, dropout_rate, no_layers, no_transition_layers, no_resnet_layers, no_angles, trans_scale_factor, ar_epsilon, inf)
s = torch.rand((batch_size, n, c_s))
z = torch.rand((batch_size, n, n, c_z))
f = torch.randint(low=0, high=21, size=(batch_size, n)).long()
out = sm({'single': s, 'pair': z}, f)
self.assertTrue((out['frames'].shape == (no_layers, batch_size, n, 7)))
self.assertTrue((out['angles'].shape == (no_layers, batch_size, n, no_angles, 2)))
self.assertTrue((out['positions'].shape == (no_layers, batch_size, n, 14, 3)))
def test_structure_module_transition_shape(self):
batch_size = 2
n = 5
c = 7
num_layers = 3
dropout = 0.1
smt = StructureModuleTransition(c, num_layers, dropout)
s = torch.rand((batch_size, n, c))
shape_before = s.shape
s = smt(s)
shape_after = s.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_structure_module_compare(self):
config = compare_utils.get_alphafold_config()
c_sm = config.model.heads.structure_module
c_global = config.model.global_config
def run_sm(representations, batch):
sm = alphafold.model.folding.StructureModule(c_sm, c_global)
representations = {k: jax.lax.stop_gradient(v) for (k, v) in representations.items()}
batch = {k: jax.lax.stop_gradient(v) for (k, v) in batch.items()}
return sm(representations, batch, is_training=False)
f = hk.transform(run_sm)
n_res = 200
representations = {'single': np.random.rand(n_res, consts.c_s).astype(np.float32), 'pair': np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)}
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'aatype': np.random.randint(0, 21, (n_res,))}
batch['atom14_atom_exists'] = np.take(restype_atom14_mask, batch['aatype'], axis=0)
batch['atom37_atom_exists'] = np.take(restype_atom37_mask, batch['aatype'], axis=0)
batch.update(make_atom14_masks_np(batch))
params = compare_utils.fetch_alphafold_module_weights('alphafold/alphafold_iteration/structure_module')
key = jax.random.PRNGKey(42)
out_gt = f.apply(params, key, representations, batch)
out_gt = torch.as_tensor(np.array(out_gt['final_atom14_positions'].block_until_ready()))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.structure_module({'single': torch.as_tensor(representations['single']).cuda(), 'pair': torch.as_tensor(representations['pair']).cuda()}, torch.as_tensor(batch['aatype']).cuda(), mask=torch.as_tensor(batch['seq_mask']).cuda(), inplace_safe=False)
out_repro = out_repro['positions'][(- 1)].cpu()
self.assertTrue((torch.mean(torch.abs((out_gt - out_repro))) < 0.05))
|
class TestInvariantPointAttention(unittest.TestCase):
def test_shape(self):
c_m = 13
c_z = 17
c_hidden = 19
no_heads = 5
no_qp = 7
no_vp = 11
batch_size = 2
n_res = 23
s = torch.rand((batch_size, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
mask = torch.ones((batch_size, n_res))
rot_mats = torch.rand((batch_size, n_res, 3, 3))
rots = Rotation(rot_mats=rot_mats, quats=None)
trans = torch.rand((batch_size, n_res, 3))
r = Rigid(rots, trans)
ipa = InvariantPointAttention(c_m, c_z, c_hidden, no_heads, no_qp, no_vp)
shape_before = s.shape
s = ipa(s, z, r, mask)
self.assertTrue((s.shape == shape_before))
@compare_utils.skip_unless_alphafold_installed()
def test_ipa_compare(self):
def run_ipa(act, static_feat_2d, mask, affine):
config = compare_utils.get_alphafold_config()
ipa = alphafold.model.folding.InvariantPointAttention(config.model.heads.structure_module, config.model.global_config)
attn = ipa(inputs_1d=act, inputs_2d=static_feat_2d, mask=mask, affine=affine)
return attn
f = hk.transform(run_ipa)
n_res = consts.n_res
c_s = consts.c_s
c_z = consts.c_z
sample_act = np.random.rand(n_res, c_s)
sample_2d = np.random.rand(n_res, n_res, c_z)
sample_mask = np.ones((n_res, 1))
affines = random_affines_4x4((n_res,))
rigids = alphafold.model.r3.rigids_from_tensor4x4(affines)
quats = alphafold.model.r3.rigids_to_quataffine(rigids)
transformations = Rigid.from_tensor_4x4(torch.as_tensor(affines).float().cuda())
sample_affine = quats
ipa_params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/structure_module/' + 'fold_iteration/invariant_point_attention'))
out_gt = f.apply(ipa_params, None, sample_act, sample_2d, sample_mask, sample_affine).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
with torch.no_grad():
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.structure_module.ipa(torch.as_tensor(sample_act).float().cuda(), torch.as_tensor(sample_2d).float().cuda(), transformations, torch.as_tensor(sample_mask.squeeze((- 1))).float().cuda()).cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
|
class TestAngleResnet(unittest.TestCase):
def test_shape(self):
batch_size = 2
n = 3
c_s = 13
c_hidden = 11
no_layers = 5
no_angles = 7
epsilon = 1e-12
ar = AngleResnet(c_s, c_hidden, no_layers, no_angles, epsilon)
a = torch.rand((batch_size, n, c_s))
a_initial = torch.rand((batch_size, n, c_s))
(_, a) = ar(a, a_initial)
self.assertTrue((a.shape == (batch_size, n, no_angles, 2)))
|
class TestTemplatePointwiseAttention(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
c_t = consts.c_t
c_z = consts.c_z
c = 26
no_heads = 13
n_res = consts.n_res
inf = 10000000.0
tpa = TemplatePointwiseAttention(c_t, c_z, c, no_heads, inf=inf)
t = torch.rand((batch_size, n_seq, n_res, n_res, c_t))
z = torch.rand((batch_size, n_res, n_res, c_z))
z_update = tpa(t, z, chunk_size=None)
self.assertTrue((z_update.shape == z.shape))
|
class TestTemplatePairStack(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
c_t = consts.c_t
c_hidden_tri_att = 7
c_hidden_tri_mul = 7
no_blocks = 2
no_heads = 4
pt_inner_dim = 15
dropout = 0.25
n_templ = consts.n_templ
n_res = consts.n_res
blocks_per_ckpt = None
chunk_size = 4
inf = 10000000.0
eps = 1e-07
tpe = TemplatePairStack(c_t, c_hidden_tri_att=c_hidden_tri_att, c_hidden_tri_mul=c_hidden_tri_mul, no_blocks=no_blocks, no_heads=no_heads, pair_transition_n=pt_inner_dim, dropout_rate=dropout, blocks_per_ckpt=None, inf=inf, eps=eps)
t = torch.rand((batch_size, n_templ, n_res, n_res, c_t))
mask = torch.randint(0, 2, (batch_size, n_templ, n_res, n_res))
shape_before = t.shape
t = tpe(t, mask, chunk_size=chunk_size)
shape_after = t.shape
self.assertTrue((shape_before == shape_after))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_template_pair_stack(pair_act, pair_mask):
config = compare_utils.get_alphafold_config()
c_ee = config.model.embeddings_and_evoformer
tps = alphafold.model.modules.TemplatePairStack(c_ee.template.template_pair_stack, config.model.global_config, name='template_pair_stack')
act = tps(pair_act, pair_mask, is_training=False)
ln = hk.LayerNorm([(- 1)], True, True, name='output_layer_norm')
act = ln(act)
return act
f = hk.transform(run_template_pair_stack)
n_res = consts.n_res
pair_act = np.random.rand(n_res, n_res, consts.c_t).astype(np.float32)
pair_mask = np.random.randint(low=0, high=2, size=(n_res, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/template_embedding/' + 'single_template_embedding/template_pair_stack'))
params.update(compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/template_embedding/' + 'single_template_embedding/output_layer_norm')))
out_gt = f.apply(params, jax.random.PRNGKey(42), pair_act, pair_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.template_pair_stack(torch.as_tensor(pair_act).unsqueeze((- 4)).cuda(), torch.as_tensor(pair_mask).unsqueeze((- 3)).cuda(), chunk_size=None, _mask_trans=False).cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
|
class Template(unittest.TestCase):
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def test_template_embedding(pair, batch, mask_2d):
config = compare_utils.get_alphafold_config()
te = alphafold.model.modules.TemplateEmbedding(config.model.embeddings_and_evoformer.template, config.model.global_config)
act = te(pair, batch, mask_2d, is_training=False)
return act
f = hk.transform(test_template_embedding)
n_res = consts.n_res
n_templ = consts.n_templ
pair_act = np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)
batch = random_template_feats(n_templ, n_res)
batch['template_all_atom_masks'] = batch['template_all_atom_mask']
pair_mask = np.random.randint(0, 2, (n_res, n_res)).astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights('alphafold/alphafold_iteration/evoformer/template_embedding')
out_gt = f.apply(params, jax.random.PRNGKey(42), pair_act, batch, pair_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
inds = np.random.randint(0, 21, (n_res,))
batch['target_feat'] = np.eye(22)[inds]
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.embed_templates({k: torch.as_tensor(v).cuda() for (k, v) in batch.items()}, torch.as_tensor(pair_act).cuda(), torch.as_tensor(pair_mask).cuda(), templ_dim=0, inplace_safe=False)
out_repro = out_repro['template_pair_embedding']
out_repro = out_repro.cpu()
self.assertTrue(torch.max((torch.abs((out_gt - out_repro)) < consts.eps)))
|
class TestTriangularAttention(unittest.TestCase):
def test_shape(self):
c_z = consts.c_z
c = 12
no_heads = 4
starting = True
tan = TriangleAttention(c_z, c, no_heads, starting)
batch_size = consts.batch_size
n_res = consts.n_res
x = torch.rand((batch_size, n_res, n_res, c_z))
shape_before = x.shape
x = tan(x, chunk_size=None)
shape_after = x.shape
self.assertTrue((shape_before == shape_after))
def _tri_att_compare(self, starting=False):
name = (('triangle_attention_' + ('starting' if starting else 'ending')) + '_node')
def run_tri_att(pair_act, pair_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
tri_att = alphafold.model.modules.TriangleAttention((c_e.triangle_attention_starting_node if starting else c_e.triangle_attention_ending_node), config.model.global_config, name=name)
act = tri_att(pair_act=pair_act, pair_mask=pair_mask)
return act
f = hk.transform(run_tri_att)
n_res = consts.n_res
pair_act = (np.random.rand(n_res, n_res, consts.c_z) * 100)
pair_mask = np.random.randint(low=0, high=2, size=(n_res, n_res))
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + name))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, pair_act, pair_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
module = (model.evoformer.blocks[0].core.tri_att_start if starting else model.evoformer.blocks[0].core.tri_att_end)
module = copy.deepcopy(module)
module.starting = starting
out_repro = module(torch.as_tensor(pair_act, dtype=torch.float32).cuda(), mask=torch.as_tensor(pair_mask, dtype=torch.float32).cuda(), chunk_size=None).cpu()
self.assertTrue((torch.mean(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_tri_att_end_compare(self):
self._tri_att_compare()
@compare_utils.skip_unless_alphafold_installed()
def test_tri_att_start_compare(self):
self._tri_att_compare(starting=True)
|
class TestTriangularMultiplicativeUpdate(unittest.TestCase):
def test_shape(self):
c_z = consts.c_z
c = 11
tm = TriangleMultiplicationOutgoing(c_z, c)
n_res = consts.c_z
batch_size = consts.batch_size
x = torch.rand((batch_size, n_res, n_res, c_z))
mask = torch.randint(0, 2, size=(batch_size, n_res, n_res))
shape_before = x.shape
x = tm(x, mask)
shape_after = x.shape
self.assertTrue((shape_before == shape_after))
def _tri_mul_compare(self, incoming=False):
name = ('triangle_multiplication_' + ('incoming' if incoming else 'outgoing'))
def run_tri_mul(pair_act, pair_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
tri_mul = alphafold.model.modules.TriangleMultiplication((c_e.triangle_multiplication_incoming if incoming else c_e.triangle_multiplication_outgoing), config.model.global_config, name=name)
act = tri_mul(act=pair_act, mask=pair_mask)
return act
f = hk.transform(run_tri_mul)
n_res = consts.n_res
pair_act = np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)
pair_mask = np.random.randint(low=0, high=2, size=(n_res, n_res))
pair_mask = pair_mask.astype(np.float32)
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/evoformer_iteration/' + name))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, pair_act, pair_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
module = (model.evoformer.blocks[0].core.tri_mul_in if incoming else model.evoformer.blocks[0].core.tri_mul_out)
out_repro = module(torch.as_tensor(pair_act, dtype=torch.float32).cuda(), mask=torch.as_tensor(pair_mask, dtype=torch.float32).cuda(), inplace_safe=True, _inplace_chunk_size=4).cpu()
self.assertTrue((torch.mean(torch.abs((out_gt - out_repro))) < consts.eps))
@compare_utils.skip_unless_alphafold_installed()
def test_tri_mul_out_compare(self):
self._tri_mul_compare()
@compare_utils.skip_unless_alphafold_installed()
def test_tri_mul_in_compare(self):
self._tri_mul_compare(incoming=True)
def _tri_mul_inplace(self, incoming=False):
n_res = consts.n_res
pair_act = np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)
pair_mask = np.random.randint(low=0, high=2, size=(n_res, n_res))
pair_mask = pair_mask.astype(np.float32)
model = compare_utils.get_global_pretrained_openfold()
module = (model.evoformer.blocks[0].core.tri_mul_in if incoming else model.evoformer.blocks[0].core.tri_mul_out)
out_stock = module(torch.as_tensor(pair_act, dtype=torch.float32).cuda(), mask=torch.as_tensor(pair_mask, dtype=torch.float32).cuda(), inplace_safe=False).cpu()
out_inplace = module(torch.as_tensor(pair_act, dtype=torch.float32).cuda(), mask=torch.as_tensor(pair_mask, dtype=torch.float32).cuda(), inplace_safe=True, _inplace_chunk_size=2).cpu()
self.assertTrue((torch.mean(torch.abs((out_stock - out_inplace))) < consts.eps))
def test_tri_mul_out_inference(self):
self._tri_mul_inplace()
def test_tri_mul_in_inference(self):
self._tri_mul_inplace(incoming=True)
|
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
config = model_config(args.config_preset)
random_seed = args.data_random_seed
if (random_seed is None):
random_seed = random.randrange((2 ** 32))
numpy.random.seed(random_seed)
torch.manual_seed((random_seed + 1))
feature_processor = feature_pipeline.FeaturePipeline(config.data)
with open(args.input_fasta) as fasta_file:
(tags, sequences) = parse_fasta(fasta_file.read())
if (len(sequences) != 1):
raise ValueError('the threading script can only process a single sequence')
query_sequence = sequences[0]
query_tag = tags[0]
feature_dict = make_sequence_features_with_custom_template(query_sequence, args.input_mmcif, args.template_id, args.chain_id, args.kalign_binary_path)
processed_feature_dict = feature_processor.process_features(feature_dict, mode='predict')
processed_feature_dict = {k: torch.as_tensor(v, device=args.model_device) for (k, v) in processed_feature_dict.items()}
model_generator = load_models_from_command_line(config, args.model_device, args.openfold_checkpoint_path, args.jax_param_path, args.output_dir)
output_name = f'{query_tag}_{args.config_preset}'
for (model, output_directory) in model_generator:
out = run_model(model, processed_feature_dict, query_tag, args.output_dir)
processed_feature_dict = tensor_tree_map((lambda x: numpy.array(x[(..., (- 1))].cpu())), processed_feature_dict)
out = tensor_tree_map((lambda x: numpy.array(x.cpu())), out)
unrelaxed_protein = prep_output(out, processed_feature_dict, feature_dict, feature_processor, args.config_preset, 200, args.subtract_plddt)
unrelaxed_output_path = os.path.join(output_directory, f'{output_name}_unrelaxed.pdb')
with open(unrelaxed_output_path, 'w') as fp:
fp.write(protein.to_pdb(unrelaxed_protein))
logger.info(f'Output written to {unrelaxed_output_path}...')
logger.info(f'Running relaxation on {unrelaxed_output_path}...')
relax_protein(config, args.model_device, unrelaxed_protein, output_directory, output_name, False)
|
def get_args_parser():
parser = argparse.ArgumentParser('Output ddgs for all single and double mutations')
parser.add_argument('--name', type=str, help='name to save under')
parser.add_argument('--seq', type=str, help='raw sequence or fasta file')
parser.add_argument('--msa_dir', type=str, help='directory with 1 or more a3m files')
parser.add_argument('--output_dir', type=Path, default='logs/debug')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--resume', default='', type=str)
parser.add_argument('--aa_expand', default='backbone', help='scratch|backbone')
parser.add_argument('--single_dec', default='naive', help='naive')
parser.add_argument('--multi_dec', default='epistasis', help='additive|epistasis')
parser.add_argument('--head_dim', type=int, default=128)
parser.add_argument('--backbone', default='esm2_t33_650M_UR50D', help='af|esm2_t33_650M_UR50D')
parser.add_argument('--finetune_backbone', type=str, default='models/finetuning_ptm_2.pt')
parser.add_argument('--freeze_at', default=0, help='freeze backbone up to layer X')
parser.add_argument('--device', default='cuda')
parser.add_argument('--n_msa_seqs', type=int, default=128)
parser.add_argument('--n_extra_msa_seqs', type=int, default=1024)
parser.add_argument('--af_extract_feat', type=str, default='both', help='which features to use from AF: both|evo|struct')
return parser
|
@torch.no_grad()
def forward_esm(model, alphabet, args):
device = torch.device(args.device)
seqs = [('1', load_seq(args.seq))]
batch_converter = alphabet.get_batch_converter()
(_, _, x) = batch_converter(seqs)
x = x.to(device)
model.to(device)
pred = model(x, {'seqs': [load_seq(args.seq)]})
return pred
|
@torch.no_grad()
def forward_af(model, args):
device = torch.device(args.device)
from openfold.config import model_config
from openfold.data import feature_pipeline, data_pipeline
config = model_config('finetuning', train=True)
config.data.train.max_extra_msa = 1024
config.data.predict.max_extra_msa = 1024
config.data.train.max_msa_clusters = 128
config.data.predict.max_msa_clusters = 128
data_processor = data_pipeline.DataPipeline(None)
feature_processor = feature_pipeline.FeaturePipeline(config.data)
feature_dict = data_processor.process_fasta(args.seq, args.msa_dir)
af_inputs = feature_processor.process_features(feature_dict, mode='predict')
x = [{k: v.to(device) for (k, v) in af_inputs.items()}]
model.to(device)
pred = model(x, {'seqs': [load_seq(args.seq)]})
return pred
|
def load_seq(seq):
if ('.fasta' in seq):
for record in SeqIO.parse(seq, 'fasta'):
seq = str(record.seq)
return seq
|
def main(args):
print('WARNING: We observe a cysteine stabilization bias when examining DMS predictions (cysteine is often predicted to be the most stabilizing substitution). We are unsure if this is an artifact from the training data but attempts to fix this bias lead to worse metrics on the test set. Use cysteine predictions with caution.')
seed = (args.seed + misc.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
model = MutateEverything(args)
misc.load_model(args, model, None, None)
model.eval()
print(f'Start testing')
start_time = time.time()
seq = load_seq(args.seq)
if (args.backbone == 'af'):
pred = forward_af(model, args)
elif ('esm' in args.backbone):
(_, alphabet) = model.backbone.get_alphabet()
pred = forward_esm(model, alphabet, args)
mut1_ddg = pred['mut1_ddg'][0].cpu()
mut2_ddg = pred['mut2_ddg'][0].cpu()
rows = []
for l in range(len(seq)):
mut1_ddg_l = mut1_ddg[l]
muts = {one_to_three[k]: v for (k, v) in zip(one_letters, mut1_ddg_l)}
muts = {f'pr{k}': f'{muts[k].item():.04f}' for k in sorted(muts)}
rows.append({'seq_num': (l + 1), 'wtAA': one_to_three[seq[l]], 'predAA': one_to_three[one_letters[mut1_ddg_l.argmin()]], 'pred_ddG': f'{mut1_ddg_l.min().item():.04f}', 'stable_mut_count': (mut1_ddg_l < (- 0.5)).sum().item(), 'neutral_mut_count': (((- 0.5) < mut1_ddg_l) & (mut1_ddg_l < 0.5)).sum().item(), 'destable_mut_count': (mut1_ddg_l > 0.5).sum().item(), **muts, 'seq': seq})
df = pd.DataFrame.from_dict(rows)
fp = (args.output_dir / f'{args.name}_single.csv')
print(f'Writing pred dms to {fp}')
df.to_csv(fp, index=False)
stbl2 = (mut2_ddg < (- 0.5))
(p1s, a1s, p2s, a2s) = stbl2.nonzero(as_tuple=True)
cond = ((p1s < p2s) & (a1s < a2s))
p1s = p1s[cond]
a1s = a1s[cond]
p2s = p2s[cond]
a2s = a2s[cond]
muts = []
for (p1, a1, p2, a2) in zip(p1s, a1s, p2s, a2s):
wt1 = seq[p1]
wt2 = seq[p2]
if ((wt1 == one_letters[a1]) or (wt2 == one_letters[a2])):
continue
m1 = f'{wt1}{(p1 + 1)}{one_letters[a1]}'
m2 = f'{wt2}{(p2 + 1)}{one_letters[a2]}'
ddg = mut2_ddg[(p1, a1, p2, a2)].item()
muts.append((m1, m2, ddg))
df2 = pd.DataFrame(muts, columns=['mut1', 'mut2', 'ddG'])
df2 = df2.sort_values('ddG')
fp = (args.output_dir / f'{args.name}_double.csv')
print(f'Writing stabilizing doubles to {fp}')
df2.to_csv(fp, index=False)
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Testing time {}'.format(total_time_str))
|
@ex.config
def cfg_base():
uuid = 'basic'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'ppo_epoch': 8, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualExplorationEnv', 'env_specific_kwargs': {}, 'sensors': {}, 'num_processes': 1, 'num_val_processes': 0, 'additional_repeat_count': 0, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None}
cfg['saving'] = {'autofix_log_dir': False, 'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'logging_type': 'tensorboard', 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_port': '8097', 'visdom_server': 'localhost'}
cfg['training'] = {'cuda': True, 'num_frames': 500000.0, 'resumable': True, 'seed': 42}
|
@ex.named_config
def cfg_doom_navigation():
uuid = 'doom_visualnavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.01, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 200, 'num_mini_batch': 16, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 4, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.0001, 'perception_network': 'AtariNet', 'test': False, 'use_replay': False, 'replay_buffer_size': 1000, 'on_policy_epoch': 4, 'off_policy_epoch': 0}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Doom_VizdoomMultiGoalWithClutterEnv.room-v0', 'env_specific_args': {'episode_timeout': 100, 'n_clutter_objects': 8, 'n_goal_objects': 1}, 'sensors': {'rgb_filled': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'additional_repeat_count': 3}
cfg['saving'] = {'port': 8097, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis': False, 'vis_interval': 200, 'launcher_script': None, 'visdom_server': 'localhost', 'visdom_port': '8097', 'checkpoint': None, 'checkpoint_configs': False}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 5000000.0, 'resumable': True}
|
@ex.named_config
def scratch_doom():
uuid = 'doom_scratch'
cfg = {}
cfg['learner'] = {'perception_network': 'AtariNet', 'perception_network_kwargs': {'n_map_channels': 0, 'use_target': False}}
cfg['env'] = {'env_specific_kwargs': {'episode_timeout': 1000, 'n_clutter_objects': 8, 'n_goal_objects': 1}, 'transform_fn_pre_aggregation': "\n TransformFactory.splitting(\n {\n 'color': {\n 'rgb_filled':rescale_centercrop_resize((3,84,84)) }\n },\n keep_unnamed=False)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': None}
|
@ex.named_config
def cfg_doom_exploration():
uuid = 'doom_myopicexploration'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.01, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 200, 'num_mini_batch': 16, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 4, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.0001, 'perception_network': 'AtariNet', 'test': False, 'use_replay': False, 'replay_buffer_size': 1000, 'on_policy_epoch': 4, 'off_policy_epoch': 0}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Doom_VizdoomExplorationEnv.room-v0', 'env_specific_args': {'episode_timeout': 2000}, 'sensors': {'rgb_filled': None, 'taskonomy': None, 'map': None, 'occupancy': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'additional_repeat_count': 3}
cfg['saving'] = {'port': 8097, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis': False, 'vis_interval': 200, 'launcher_script': None, 'visdom_server': 'localhost', 'visdom_port': '8097', 'checkpoint': None, 'checkpoint_configs': False}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def scratch_doom_exploration():
uuid = 'doom_scratch_exploration'
cfg = {}
cfg['learner'] = {'perception_network': 'AtariNet', 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}}
cfg['env'] = {'env_specific_kwargs': {}, 'transform_fn_pre_aggregation': "\n TransformFactory.splitting(\n {\n 'color': {\n 'rgb_filled':rescale_centercrop_resize((3,84,84)) },\n 'occupancy': {\n 'map': rescale_centercrop_resize((1,84,84))}\n },\n keep_unnamed=False)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': None}
|
@ex.named_config
def cfg_exploration():
uuid = 'gibson_exploration'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualExplorationEnv', 'env_specific_kwargs': {'target_dim': 16, 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_visual_explore_train_noX.yaml', 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv'), 'blind': False, 'blank_sensor': True}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'additional_repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def cfg_navigation():
uuid = 'gibson_visualnavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualNavigateEnv', 'env_specific_kwargs': {'blind': False, 'blank_sensor': True, 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_visual_navigate.yaml', 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv')}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def cfg_planning():
uuid = 'gibson_coordinatenavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyCoordinateNavigateEnv', 'env_specific_kwargs': {'blind': False, 'blank_sensor': True, 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv'), 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_coordinate_navigate.yaml', 'target_dim': 16}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True}
|
@ex.named_config
def cfg_habitat():
uuid = 'habitat_core'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 1000, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network_reinit': False, 'perception_network': 'AtariNet', 'perception_network_kwargs': {'extra_kwargs': {'normalize_taskonomy': True}}, 'test': False, 'use_replay': True, 'replay_buffer_size': 3000, 'on_policy_epoch': 8, 'off_policy_epoch': 8, 'slam_class': None, 'slam_kwargs': {}, 'loss_kwargs': {'intrinsic_loss_coefs': [], 'intrinsic_loss_types': []}, 'deterministic': False, 'rollout_value_batch_multiplier': 2, 'cache_kwargs': {}, 'optimizer_class': 'optim.Adam', 'optimizer_kwargs': {}}
cfg['env'] = {'add_timestep': False, 'env_name': 'Habitat_PointNav', 'env_specific_kwargs': {'swap_building_k_episodes': 10, 'gpu_devices': [0], 'scenario_kwargs': {'use_depth': False, 'max_geodesic_dist': 99999}, 'map_kwargs': {'map_building_size': 22, 'map_max_pool': False, 'use_cuda': False, 'history_size': None}, 'target_dim': 16, 'val_scenes': None, 'train_scenes': None}, 'sensors': {'features': None, 'taskonomy': None, 'rgb_filled': None, 'map': None, 'target': None, 'depth': None, 'global_pos': None, 'pointgoal': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_pre_aggregation_fn': None, 'transform_fn_pre_aggregation_kwargs': {}, 'transform_fn_post_aggregation': None, 'transform_fn_post_aggregation_fn': None, 'transform_fn_post_aggregation_kwargs': {}, 'num_processes': 8, 'num_val_processes': 1, 'additional_repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_num': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 10, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097', 'obliterate_logs': False}
cfg['training'] = {'cuda': True, 'gpu_devices': None, 'seed': 42, 'num_frames': 100000000.0, 'resumable': False}
|
@ex.named_config
def cfg_test():
cfg = {}
cfg['saving'] = {'resumable': True, 'checkpoint_configs': True}
override = {}
override['saving'] = {'visdom_server': 'localhost'}
override['env'] = {'num_processes': 10, 'num_val_processes': 10, 'env_specific_kwargs': {'test_mode': True, 'scenario_kwargs': {'max_geodesic_dist': 99999}}}
override['learner'] = {'test_k_episodes': 994, 'test': True}
|
@ex.named_config
def planning():
uuid = 'habitat_planning'
cfg = {}
cfg['learner'] = {'perception_network_kwargs': {'n_map_channels': 3, 'use_target': True}}
cfg['env'] = {'env_name': 'Habitat_PointNav', 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'map': 'identity_transform()', 'global_pos': 'identity_transform()', 'target': 'identity_transform()'}, 'keep_unnamed': False}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'map': 'map_pool_collated((3,84,84))'}, 'keep_unnamed': True}}
|
@ex.named_config
def exploration():
uuid = 'habitat_exploration'
cfg = {}
cfg['learner'] = {'lr': 0.001, 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}}
cfg['env'] = {'env_name': 'Habitat_Exploration', 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'map': 'identity_transform()', 'global_pos': 'identity_transform()'}, 'keep_unnamed': False}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'map': 'map_pool_collated((1,84,84))'}, 'keep_unnamed': True}, 'env_specific_kwargs': {'scenario_kwargs': {'max_episode_steps': 1000}, 'map_kwargs': {'map_size': 84, 'fov': (np.pi / 2), 'min_depth': 0, 'max_depth': 1.5, 'relative_range': True, 'map_x_range': [(- 11), 11], 'map_y_range': [(- 11), 11], 'fullvision': False}, 'reward_kwargs': {'slack_reward': 0}}}
|
@ex.named_config
def small_settings5():
uuid = 'habitat_small_settings5'
cfg = {}
cfg['learner'] = {'num_steps': 512, 'replay_buffer_size': 1024, 'on_policy_epoch': 5, 'off_policy_epoch': 10, 'num_mini_batch': 24, 'rollout_value_batch_multiplier': 1}
cfg['env'] = {'num_processes': 6, 'num_val_processes': 1}
|
@ex.named_config
def cvpr_settings():
uuid = 'habitat_cvpr_settings'
cfg = {}
cfg['learner'] = {'num_steps': 512, 'replay_buffer_size': 4096, 'on_policy_epoch': 8, 'off_policy_epoch': 8, 'num_mini_batch': 8, 'rollout_value_batch_multiplier': 1}
cfg['env'] = {'num_processes': 6, 'num_val_processes': 1}
|
@ex.named_config
def prototype():
uuid = 'test'
cfg = {}
cfg['env'] = {'num_processes': 2, 'num_val_processes': 1, 'env_specific_kwargs': {'train_scenes': ['Adrian'], 'val_scenes': ['Denmark']}}
cfg['saving'] = {'log_interval': 2, 'vis_interval': 1}
|
@ex.named_config
def debug():
uuid = 'test'
cfg = {}
override = {}
cfg['learner'] = {'num_steps': 100, 'replay_buffer_size': 300, 'deterministic': True}
cfg['env'] = {'num_processes': 1, 'num_val_processes': 0, 'env_specific_kwargs': {'train_scenes': ['Adrian'], 'debug_mode': True}}
cfg['saving'] = {'log_interval': 2, 'vis_interval': 1}
override['env'] = {'num_processes': 1, 'num_val_processes': 0, 'env_specific_kwargs': {'debug_mode': True}}
|
@ex.config
def cfg_base():
cfg = {}
uuid = ''
config_file = os.path.join(os.getcwd(), 'habitat-api/configs/tasks/pointnav_gibson_val.yaml')
cfg['eval_kwargs'] = {'exp_path': '/mnt/logdir/keypoints3d_encoding_restart1', 'weights_only_path': None, 'challenge': True, 'debug': False, 'overwrite_configs': True, 'benchmark_episodes': 10, 'benchmark_config': config_file}
|
@ex.named_config
def weights_only():
cfg = {}
cfg['eval_kwargs'] = {'exp_path': None, 'weights_only_path': '/mnt/eval_runs/curvature_encoding_moresteps_collate5/checkpoints/weights_and_more-latest.dat'}
|
@ex.named_config
def cfg_overwrite():
cfg = {}
uuid = '_overwrite'
cfg['learner'] = {'taskonomy_encoder': '/mnt/models/keypoints3d_encoder.dat', 'perception_network': 'features_only', 'encoder_type': 'taskonomy', 'backout': {'use_backout': True, 'patience': 80, 'unstuck_dist': 0.3, 'randomize_actions': True, 'backout_type': 'hardcoded', 'backout_ckpt_path': '/mnt/logdir/curvature_encoding_moresteps_collate/checkpoints/ckpt-latest.dat', 'num_takeover_steps': 8}, 'validator': {'use_validator': True, 'validator_type': 'jerk'}}
image_dim = 84
cfg['env'] = {'sensors': {'features': None, 'taskonomy': None, 'map': None, 'target': None, 'global_pos': None}, 'collate_env_obs': False, 'env_gpus': [0], 'transform_fn': "TransformFactory.independent({{'taskonomy':taskonomy_features_transform('{taskonomy_encoder}', encoder_type='{encoder_type}'), 'map':image_to_input_pool((3,{image_dim},{image_dim})), 'target':identity_transform(), 'global_pos':identity_transform()}}, keep_unnamed=False)".format(encoder_type=cfg['learner']['encoder_type'], taskonomy_encoder=cfg['learner']['taskonomy_encoder'], image_dim=image_dim), 'use_target': True, 'use_map': True, 'habitat_map_kwargs': {'map_building_size': 22, 'map_max_pool': False, 'use_cuda': False, 'history_size': None}, 'env_specific_kwargs': {'target_dim': 16}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy':rescale_centercrop_resize((3,256,256)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':taskonomy_features_transform('{taskonomy_encoder}'),\n 'target':identity_transform(),\n 'map':map_pool_collated((3,84,84)),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(taskonomy_encoder='/mnt/models/normal_encoder.dat')}
cfg['training'] = {'seed': 42}
del image_dim
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.