repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
SBRG/ssbio | ssbio/protein/structure/utils/foldx.py | FoldX.create_random_mutation_file | def create_random_mutation_file(self, list_of_tuples, original_sequence,
randomize_resnums=False, randomize_resids=False,
skip_resnums=None):
"""Create the FoldX file 'individual_list.txt', but randomize the mutation numbers or residues that were input.
The randomize combinations can be a little confusing - this is what can happen:
- randomize_resnums=False, randomize_resids=False: no change, original mutations are carried out
- randomize_resnums=True, randomize_resids=False: mutations of resid X to resid Y will be carried out,
but on a different residue number where resid X is found
- randomize_resnums=False, randomize_resids=True: mutations of residue X# to a random residue will be
carried out
- randomize_resnums=True, randomize_resids=True: original mutations will be ignored, random mutation of
any residue will be carried out
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to be randomized.
original_sequence (str, Seq, SeqRecord): Original amino acid sequence
randomize_resnums (bool): If residue numbers should be randomized
randomize_resids (bool): If residues themselves should be randomized
skip_resnums (list):
"""
import random
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch] | python | def create_random_mutation_file(self, list_of_tuples, original_sequence,
randomize_resnums=False, randomize_resids=False,
skip_resnums=None):
"""Create the FoldX file 'individual_list.txt', but randomize the mutation numbers or residues that were input.
The randomize combinations can be a little confusing - this is what can happen:
- randomize_resnums=False, randomize_resids=False: no change, original mutations are carried out
- randomize_resnums=True, randomize_resids=False: mutations of resid X to resid Y will be carried out,
but on a different residue number where resid X is found
- randomize_resnums=False, randomize_resids=True: mutations of residue X# to a random residue will be
carried out
- randomize_resnums=True, randomize_resids=True: original mutations will be ignored, random mutation of
any residue will be carried out
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to be randomized.
original_sequence (str, Seq, SeqRecord): Original amino acid sequence
randomize_resnums (bool): If residue numbers should be randomized
randomize_resids (bool): If residues themselves should be randomized
skip_resnums (list):
"""
import random
def find(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch] | [
"def",
"create_random_mutation_file",
"(",
"self",
",",
"list_of_tuples",
",",
"original_sequence",
",",
"randomize_resnums",
"=",
"False",
",",
"randomize_resids",
"=",
"False",
",",
"skip_resnums",
"=",
"None",
")",
":",
"import",
"random",
"def",
"find",
"(",
... | Create the FoldX file 'individual_list.txt', but randomize the mutation numbers or residues that were input.
The randomize combinations can be a little confusing - this is what can happen:
- randomize_resnums=False, randomize_resids=False: no change, original mutations are carried out
- randomize_resnums=True, randomize_resids=False: mutations of resid X to resid Y will be carried out,
but on a different residue number where resid X is found
- randomize_resnums=False, randomize_resids=True: mutations of residue X# to a random residue will be
carried out
- randomize_resnums=True, randomize_resids=True: original mutations will be ignored, random mutation of
any residue will be carried out
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to be randomized.
original_sequence (str, Seq, SeqRecord): Original amino acid sequence
randomize_resnums (bool): If residue numbers should be randomized
randomize_resids (bool): If residues themselves should be randomized
skip_resnums (list): | [
"Create",
"the",
"FoldX",
"file",
"individual_list",
".",
"txt",
"but",
"randomize",
"the",
"mutation",
"numbers",
"or",
"residues",
"that",
"were",
"input",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L160-L187 | train | 29,000 |
SBRG/ssbio | ssbio/protein/structure/utils/foldx.py | FoldX.run_build_model | def run_build_model(self, num_runs=5, silent=False, force_rerun=False):
"""Run FoldX BuildModel command with a mutant file input.
Original command::
foldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5
Args:
num_runs (int):
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists.
"""
# BuildModel output files
self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
# BuildModel command
foldx_build_model = 'foldx --command=BuildModel --pdb={} --mutant-file={} --numberOfRuns={}'.format(self.repaired_pdb_outfile,
op.basename(self.mutation_infile),
num_runs)
ssbio.utils.command_runner(shell_command=foldx_build_model, force_rerun_flag=force_rerun, silent=silent,
outfile_checker=self.mutation_ddG_avg_outfile, cwd=self.foldx_dir) | python | def run_build_model(self, num_runs=5, silent=False, force_rerun=False):
"""Run FoldX BuildModel command with a mutant file input.
Original command::
foldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5
Args:
num_runs (int):
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists.
"""
# BuildModel output files
self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
# BuildModel command
foldx_build_model = 'foldx --command=BuildModel --pdb={} --mutant-file={} --numberOfRuns={}'.format(self.repaired_pdb_outfile,
op.basename(self.mutation_infile),
num_runs)
ssbio.utils.command_runner(shell_command=foldx_build_model, force_rerun_flag=force_rerun, silent=silent,
outfile_checker=self.mutation_ddG_avg_outfile, cwd=self.foldx_dir) | [
"def",
"run_build_model",
"(",
"self",
",",
"num_runs",
"=",
"5",
",",
"silent",
"=",
"False",
",",
"force_rerun",
"=",
"False",
")",
":",
"# BuildModel output files",
"self",
".",
"mutation_ddG_avg_outfile",
"=",
"'Average_{}.fxout'",
".",
"format",
"(",
"op",
... | Run FoldX BuildModel command with a mutant file input.
Original command::
foldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5
Args:
num_runs (int):
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX BuildModel should be rerun even if the results file exists. | [
"Run",
"FoldX",
"BuildModel",
"command",
"with",
"a",
"mutant",
"file",
"input",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L191-L214 | train | 29,001 |
SBRG/ssbio | ssbio/protein/structure/utils/foldx.py | FoldX.get_ddG_results | def get_ddG_results(self):
"""Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG.
"""
foldx_avg_df = self.df_mutation_ddG_avg
foldx_avg_ddG = {}
results = foldx_avg_df[['Pdb', 'total energy', 'SD']].T.to_dict().values()
for r in results:
ident = r['Pdb'].split('_')[-1]
ddG = r['total energy']
ddG_sd = r['SD']
foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd)
return foldx_avg_ddG | python | def get_ddG_results(self):
"""Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG.
"""
foldx_avg_df = self.df_mutation_ddG_avg
foldx_avg_ddG = {}
results = foldx_avg_df[['Pdb', 'total energy', 'SD']].T.to_dict().values()
for r in results:
ident = r['Pdb'].split('_')[-1]
ddG = r['total energy']
ddG_sd = r['SD']
foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd)
return foldx_avg_ddG | [
"def",
"get_ddG_results",
"(",
"self",
")",
":",
"foldx_avg_df",
"=",
"self",
".",
"df_mutation_ddG_avg",
"foldx_avg_ddG",
"=",
"{",
"}",
"results",
"=",
"foldx_avg_df",
"[",
"[",
"'Pdb'",
",",
"'total energy'",
",",
"'SD'",
"]",
"]",
".",
"T",
".",
"to_di... | Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG. | [
"Parse",
"the",
"results",
"from",
"BuildModel",
"and",
"get",
"the",
"delta",
"delta",
"G",
"s",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L216-L244 | train | 29,002 |
SBRG/ssbio | ssbio/protein/structure/utils/cleanpdb.py | clean_pdb | def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
outfile = ssbio.utils.outfile_maker(inname=pdb_file,
append_to_name=out_suffix,
outdir=outdir,
outext='.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
my_pdb = StructureIO(pdb_file)
my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains,
keep_chemicals=keep_chemicals)
my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix,
out_dir=outdir,
custom_selection=my_cleaner,
force_rerun=force_rerun)
return my_clean_pdb
else:
return outfile | python | def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
outfile = ssbio.utils.outfile_maker(inname=pdb_file,
append_to_name=out_suffix,
outdir=outdir,
outext='.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
my_pdb = StructureIO(pdb_file)
my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains,
keep_chemicals=keep_chemicals)
my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix,
out_dir=outdir,
custom_selection=my_cleaner,
force_rerun=force_rerun)
return my_clean_pdb
else:
return outfile | [
"def",
"clean_pdb",
"(",
"pdb_file",
",",
"out_suffix",
"=",
"'_clean'",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
",",
"remove_atom_alt",
"=",
"True",
",",
"keep_atom_alt_id",
"=",
"'A'",
",",
"remove_atom_hydrogen",
"=",
"True",
",",
"a... | Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file | [
"Clean",
"a",
"PDB",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/cleanpdb.py#L116-L165 | train | 29,003 |
SBRG/ssbio | ssbio/databases/pdb.py | parse_mmtf_header | def parse_mmtf_header(infile):
"""Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
"""
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))
infodict['chemicals'] = chemicals
return infodict | python | def parse_mmtf_header(infile):
"""Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
"""
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))
infodict['chemicals'] = chemicals
return infodict | [
"def",
"parse_mmtf_header",
"(",
"infile",
")",
":",
"infodict",
"=",
"{",
"}",
"mmtf_decoder",
"=",
"mmtf",
".",
"parse",
"(",
"infile",
")",
"infodict",
"[",
"'date'",
"]",
"=",
"mmtf_decoder",
".",
"deposition_date",
"infodict",
"[",
"'release_date'",
"]"... | Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed. | [
"Parse",
"an",
"MMTF",
"file",
"and",
"return",
"basic",
"header",
"-",
"like",
"information",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L121-L151 | train | 29,004 |
SBRG/ssbio | ssbio/databases/pdb.py | download_mmcif_header | def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
"""Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile | python | def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
"""Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile | [
"def",
"download_mmcif_header",
"(",
"pdb_id",
",",
"outdir",
"=",
"''",
",",
"force_rerun",
"=",
"False",
")",
":",
"# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this",
"# method in biopython. extra file types have not be... | Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile | [
"Download",
"a",
"mmCIF",
"header",
"file",
"from",
"the",
"RCSB",
"PDB",
"by",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L153-L180 | train | 29,005 |
SBRG/ssbio | ssbio/databases/pdb.py | parse_mmcif_header | def parse_mmcif_header(infile):
"""Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header
"""
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
# TODO: refactor how to get resolutions based on experimental method
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict | python | def parse_mmcif_header(infile):
"""Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header
"""
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
# TODO: refactor how to get resolutions based on experimental method
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict | [
"def",
"parse_mmcif_header",
"(",
"infile",
")",
":",
"from",
"Bio",
".",
"PDB",
".",
"MMCIF2Dict",
"import",
"MMCIF2Dict",
"newdict",
"=",
"{",
"}",
"try",
":",
"mmdict",
"=",
"MMCIF2Dict",
"(",
"infile",
")",
"except",
"ValueError",
"as",
"e",
":",
"lo... | Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header | [
"Parse",
"a",
"couple",
"important",
"fields",
"from",
"the",
"mmCIF",
"file",
"format",
"with",
"some",
"manual",
"curation",
"of",
"ligands",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L183-L259 | train | 29,006 |
SBRG/ssbio | ssbio/databases/pdb.py | download_sifts_xml | def download_sifts_xml(pdb_id, outdir='', force_rerun=False):
"""Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file
"""
baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
filename = '{}.xml.gz'.format(pdb_id.lower())
outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = urlopen(baseURL + filename)
with open(outfile, 'wb') as f:
f.write(gzip.decompress(response.read()))
return outfile | python | def download_sifts_xml(pdb_id, outdir='', force_rerun=False):
"""Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file
"""
baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
filename = '{}.xml.gz'.format(pdb_id.lower())
outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = urlopen(baseURL + filename)
with open(outfile, 'wb') as f:
f.write(gzip.decompress(response.read()))
return outfile | [
"def",
"download_sifts_xml",
"(",
"pdb_id",
",",
"outdir",
"=",
"''",
",",
"force_rerun",
"=",
"False",
")",
":",
"baseURL",
"=",
"'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'",
"filename",
"=",
"'{}.xml.gz'",
".",
"format",
"(",
"pdb_id",
".",
"lower",
"(",... | Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file | [
"Download",
"the",
"SIFTS",
"file",
"for",
"a",
"PDB",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L262-L284 | train | 29,007 |
SBRG/ssbio | ssbio/databases/pdb.py | map_uniprot_resnum_to_pdb | def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):
"""Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue
"""
# Load the xml with lxml
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(sifts_file, parser)
root = tree.getroot()
my_pdb_resnum = None
# TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that
my_pdb_annotation = False
# Find the right chain (entities in the xml doc)
ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'
for chain in root.findall(ent):
# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!
if chain.attrib['entityId'] == chain_id:
# Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here"
# Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum"
# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)
ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum
my_uniprot_residue = chain.findall(ures)
if len(my_uniprot_residue) == 1:
# Get crossRefDb dbSource="PDB"
parent = my_uniprot_residue[0].getparent()
pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]'
my_pdb_residue = parent.findall(pres)
my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])
# Get <residueDetail dbSource="PDBe" property="Annotation">
# Will be Not_Observed if it is not seen in the PDB
anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]'
my_pdb_annotation = parent.findall(anno)
if len(my_pdb_annotation) == 1:
my_pdb_annotation = my_pdb_annotation[0].text
if my_pdb_annotation == 'Not_Observed':
my_pdb_annotation = False
else:
my_pdb_annotation = True
else:
return None, False
return my_pdb_resnum, my_pdb_annotation | python | def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):
"""Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue
"""
# Load the xml with lxml
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(sifts_file, parser)
root = tree.getroot()
my_pdb_resnum = None
# TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that
my_pdb_annotation = False
# Find the right chain (entities in the xml doc)
ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'
for chain in root.findall(ent):
# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!
if chain.attrib['entityId'] == chain_id:
# Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here"
# Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum"
# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)
ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum
my_uniprot_residue = chain.findall(ures)
if len(my_uniprot_residue) == 1:
# Get crossRefDb dbSource="PDB"
parent = my_uniprot_residue[0].getparent()
pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]'
my_pdb_residue = parent.findall(pres)
my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])
# Get <residueDetail dbSource="PDBe" property="Annotation">
# Will be Not_Observed if it is not seen in the PDB
anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]'
my_pdb_annotation = parent.findall(anno)
if len(my_pdb_annotation) == 1:
my_pdb_annotation = my_pdb_annotation[0].text
if my_pdb_annotation == 'Not_Observed':
my_pdb_annotation = False
else:
my_pdb_annotation = True
else:
return None, False
return my_pdb_resnum, my_pdb_annotation | [
"def",
"map_uniprot_resnum_to_pdb",
"(",
"uniprot_resnum",
",",
"chain_id",
",",
"sifts_file",
")",
":",
"# Load the xml with lxml",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"ns_clean",
"=",
"True",
")",
"tree",
"=",
"etree",
".",
"parse",
"(",
"sifts_file... | Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue | [
"Map",
"a",
"UniProt",
"residue",
"number",
"to",
"its",
"corresponding",
"PDB",
"residue",
"number",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L287-L346 | train | 29,008 |
SBRG/ssbio | ssbio/databases/pdb.py | best_structures | def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False):
"""Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism
"""
outfile = ''
if not outdir:
outdir = ''
# if output dir is specified but not outname, use the uniprot
if not outname and outdir:
outname = uniprot_id
if outname:
outname = op.join(outdir, outname)
outfile = '{}.json'.format(outname)
# Load a possibly existing json file
if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(outfile, 'r') as f:
raw_data = json.load(f)
log.debug('{}: loaded existing json file'.format(uniprot_id))
# Otherwise run the web request
else:
# TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored
response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id),
data={'key': 'value'})
if response.status_code == 404:
log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id))
raw_data = {uniprot_id: {}}
else:
log.debug('{}: Obtained best structures'.format(uniprot_id))
raw_data = response.json()
# Write the json file if specified
if outfile:
with open(outfile, 'w') as f:
json.dump(raw_data, f)
log.debug('{}: Saved json file of best structures'.format(uniprot_id))
data = dict(raw_data)[uniprot_id]
# Filter for sequence identity percentage
if seq_ident_cutoff != 0:
for result in data:
if result['coverage'] < seq_ident_cutoff:
data.remove(result)
return data | python | def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False):
"""Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism
"""
outfile = ''
if not outdir:
outdir = ''
# if output dir is specified but not outname, use the uniprot
if not outname and outdir:
outname = uniprot_id
if outname:
outname = op.join(outdir, outname)
outfile = '{}.json'.format(outname)
# Load a possibly existing json file
if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(outfile, 'r') as f:
raw_data = json.load(f)
log.debug('{}: loaded existing json file'.format(uniprot_id))
# Otherwise run the web request
else:
# TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored
response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id),
data={'key': 'value'})
if response.status_code == 404:
log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id))
raw_data = {uniprot_id: {}}
else:
log.debug('{}: Obtained best structures'.format(uniprot_id))
raw_data = response.json()
# Write the json file if specified
if outfile:
with open(outfile, 'w') as f:
json.dump(raw_data, f)
log.debug('{}: Saved json file of best structures'.format(uniprot_id))
data = dict(raw_data)[uniprot_id]
# Filter for sequence identity percentage
if seq_ident_cutoff != 0:
for result in data:
if result['coverage'] < seq_ident_cutoff:
data.remove(result)
return data | [
"def",
"best_structures",
"(",
"uniprot_id",
",",
"outname",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"seq_ident_cutoff",
"=",
"0.0",
",",
"force_rerun",
"=",
"False",
")",
":",
"outfile",
"=",
"''",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"''",
... | Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism | [
"Use",
"the",
"PDBe",
"REST",
"service",
"to",
"query",
"for",
"the",
"best",
"PDB",
"structures",
"for",
"a",
"UniProt",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L349-L433 | train | 29,009 |
SBRG/ssbio | ssbio/databases/pdb.py | _property_table | def _property_table():
"""Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns
"""
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'
r = requests.get(url)
p = pd.read_csv(StringIO(r.text)).set_index('structureId')
return p | python | def _property_table():
"""Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns
"""
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'
r = requests.get(url)
p = pd.read_csv(StringIO(r.text)).set_index('structureId')
return p | [
"def",
"_property_table",
"(",
")",
":",
"url",
"=",
"'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"p",
"=",
"pd... | Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns | [
"Download",
"the",
"PDB",
"-",
">",
"resolution",
"table",
"directly",
"from",
"the",
"RCSB",
"PDB",
"REST",
"service",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L443-L455 | train | 29,010 |
SBRG/ssbio | ssbio/databases/pdb.py | get_resolution | def get_resolution(pdb_id):
"""Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[pdb_id, 'resolution']
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution | python | def get_resolution(pdb_id):
"""Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[pdb_id, 'resolution']
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution | [
"def",
"get_resolution",
"(",
"pdb_id",
")",
":",
"pdb_id",
"=",
"pdb_id",
".",
"upper",
"(",
")",
"if",
"pdb_id",
"not",
"in",
"_property_table",
"(",
")",
".",
"index",
":",
"raise",
"ValueError",
"(",
"'PDB ID not in property table'",
")",
"else",
":",
... | Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test | [
"Quick",
"way",
"to",
"get",
"the",
"resolution",
"of",
"a",
"PDB",
"ID",
"using",
"the",
"table",
"of",
"results",
"from",
"the",
"REST",
"service"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L458-L480 | train | 29,011 |
SBRG/ssbio | ssbio/databases/pdb.py | get_release_date | def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date | python | def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date | [
"def",
"get_release_date",
"(",
"pdb_id",
")",
":",
"pdb_id",
"=",
"pdb_id",
".",
"upper",
"(",
")",
"if",
"pdb_id",
"not",
"in",
"_property_table",
"(",
")",
".",
"index",
":",
"raise",
"ValueError",
"(",
"'PDB ID not in property table'",
")",
"else",
":",
... | Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID | [
"Quick",
"way",
"to",
"get",
"the",
"release",
"date",
"of",
"a",
"PDB",
"ID",
"using",
"the",
"table",
"of",
"results",
"from",
"the",
"REST",
"service"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L483-L502 | train | 29,012 |
SBRG/ssbio | ssbio/databases/pdb.py | get_num_bioassemblies | def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False):
"""Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
if not outdir:
outdir = os.getcwd()
outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id))
if ssbio.utils.force_rerun(force_rerun, outfile):
page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if cache:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded bioassembly information from REST server')
else:
log.error('Request timed out')
req.raise_for_status()
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing XML results'.format(outfile))
r = tree.getroot()
has_biomols = r.get('hasAssemblies')
if has_biomols == 'true':
has_biomols = True
else:
has_biomols = False
if has_biomols:
num_biomols = r.get('count')
else:
num_biomols = 0
num_biomols = int(num_biomols)
return num_biomols | python | def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False):
"""Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
if not outdir:
outdir = os.getcwd()
outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id))
if ssbio.utils.force_rerun(force_rerun, outfile):
page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if cache:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded bioassembly information from REST server')
else:
log.error('Request timed out')
req.raise_for_status()
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing XML results'.format(outfile))
r = tree.getroot()
has_biomols = r.get('hasAssemblies')
if has_biomols == 'true':
has_biomols = True
else:
has_biomols = False
if has_biomols:
num_biomols = r.get('count')
else:
num_biomols = 0
num_biomols = int(num_biomols)
return num_biomols | [
"def",
"get_num_bioassemblies",
"(",
"pdb_id",
",",
"cache",
"=",
"False",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"ns_clean",
"=",
"True",
")",
"if",
"not",
"outdir",
":",
... | Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again | [
"Check",
"if",
"there",
"are",
"bioassemblies",
"using",
"the",
"PDB",
"REST",
"API",
"and",
"if",
"there",
"are",
"get",
"the",
"number",
"of",
"bioassemblies",
"available",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L505-L570 | train | 29,013 |
SBRG/ssbio | ssbio/databases/pdb.py | get_bioassembly_info | def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False):
"""Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True) | python | def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False):
"""Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True) | [
"def",
"get_bioassembly_info",
"(",
"pdb_id",
",",
"biomol_num",
",",
"cache",
"=",
"False",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"parser",
"=",
"etree",
".",
"XMLParser",
"(",
"ns_clean",
"=",
"True",
")"
] | Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again | [
"Get",
"metadata",
"about",
"a",
"bioassembly",
"from",
"the",
"RCSB",
"PDB",
"s",
"REST",
"API",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L573-L596 | train | 29,014 |
SBRG/ssbio | ssbio/databases/pdb.py | download_structure | def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
"""Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id)
else:
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile | python | def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
"""Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id)
else:
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile | [
"def",
"download_structure",
"(",
"pdb_id",
",",
"file_type",
",",
"outdir",
"=",
"''",
",",
"only_header",
"=",
"False",
",",
"force_rerun",
"=",
"False",
")",
":",
"# method in biopython. extra file types have not been added to biopython download yet",
"pdb_id",
"=",
... | Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile | [
"Download",
"a",
"structure",
"from",
"the",
"RCSB",
"PDB",
"by",
"ID",
".",
"Specify",
"the",
"file",
"type",
"desired",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L681-L743 | train | 29,015 |
SBRG/ssbio | ssbio/databases/pdb.py | PDBProp.download_structure_file | def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):
"""Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists
"""
ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type',
custom_error_text='Please set file type to be downloaded from the PDB: '
'pdb, mmCif, xml, or mmtf')
# XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists
# I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension
# Then check if file exists, if not then download again
p = PDBList()
with ssbio.utils.suppress_stdout():
structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)
if not op.exists(structure_file):
log.debug('{}: {} file not available'.format(self.id, file_type))
raise URLError('{}.{}: file not available to download'.format(self.id, file_type))
else:
log.debug('{}: {} file saved'.format(self.id, file_type))
# Rename .ent files to .pdb
if file_type == 'pdb':
new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')
os.rename(structure_file, new_name)
structure_file = new_name
self.load_structure_path(structure_file, file_type)
if load_header_metadata and file_type == 'mmtf':
self.update(parse_mmtf_header(structure_file))
if load_header_metadata and file_type != 'mmtf':
self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun))) | python | def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):
"""Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists
"""
ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type',
custom_error_text='Please set file type to be downloaded from the PDB: '
'pdb, mmCif, xml, or mmtf')
# XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists
# I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension
# Then check if file exists, if not then download again
p = PDBList()
with ssbio.utils.suppress_stdout():
structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)
if not op.exists(structure_file):
log.debug('{}: {} file not available'.format(self.id, file_type))
raise URLError('{}.{}: file not available to download'.format(self.id, file_type))
else:
log.debug('{}: {} file saved'.format(self.id, file_type))
# Rename .ent files to .pdb
if file_type == 'pdb':
new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')
os.rename(structure_file, new_name)
structure_file = new_name
self.load_structure_path(structure_file, file_type)
if load_header_metadata and file_type == 'mmtf':
self.update(parse_mmtf_header(structure_file))
if load_header_metadata and file_type != 'mmtf':
self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun))) | [
"def",
"download_structure_file",
"(",
"self",
",",
"outdir",
",",
"file_type",
"=",
"None",
",",
"load_header_metadata",
"=",
"True",
",",
"force_rerun",
"=",
"False",
")",
":",
"ssbio",
".",
"utils",
".",
"double_check_attribute",
"(",
"object",
"=",
"self",... | Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists | [
"Download",
"a",
"structure",
"file",
"from",
"the",
"PDB",
"specifying",
"an",
"output",
"directory",
"and",
"a",
"file",
"type",
".",
"Optionally",
"download",
"the",
"mmCIF",
"header",
"file",
"and",
"parse",
"data",
"from",
"it",
"to",
"store",
"within",... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pdb.py#L60-L97 | train | 29,016 |
SBRG/ssbio | ssbio/protein/structure/properties/quality.py | parse_procheck | def parse_procheck(quality_directory):
"""Parses all PROCHECK files in a directory and returns a Pandas DataFrame of the results
Args:
quality_directory: path to directory with PROCHECK output (.sum files)
Returns:
Pandas DataFrame: Summary of PROCHECK results
"""
# TODO: save as dict instead, offer df as option
# TODO: parse for one file instead
procheck_summaries = glob.glob(os.path.join(quality_directory, '*.sum'))
if len(procheck_summaries) == 0:
return pd.DataFrame()
all_procheck = {}
for summ in procheck_summaries:
structure_id = os.path.basename(summ).split('.sum')[0]
procheck_dict = {}
with open(summ) as f_in:
lines = (line.rstrip() for line in f_in) # All lines including the blank ones
lines = (line for line in lines if line) # Non-blank lines
for line in lines:
if len(line.split()) > 1:
if line.split()[1] == 'Ramachandran':
procheck_dict['procheck_rama_favored'] = percentage_to_float(line.split()[3])
procheck_dict['procheck_rama_allowed'] = percentage_to_float(line.split()[5])
procheck_dict['procheck_rama_allowed_plus'] = percentage_to_float(line.split()[7])
procheck_dict['procheck_rama_disallowed'] = percentage_to_float(line.split()[9])
if line.split()[1] == 'G-factors':
procheck_dict['procheck_gfac_dihedrals'] = line.split()[3]
procheck_dict['procheck_gfac_covalent'] = line.split()[5]
procheck_dict['procheck_gfac_overall'] = line.split()[7]
all_procheck[structure_id] = procheck_dict
DF_PROCHECK = pd.DataFrame.from_dict(all_procheck, orient='index')
return DF_PROCHECK | python | def parse_procheck(quality_directory):
"""Parses all PROCHECK files in a directory and returns a Pandas DataFrame of the results
Args:
quality_directory: path to directory with PROCHECK output (.sum files)
Returns:
Pandas DataFrame: Summary of PROCHECK results
"""
# TODO: save as dict instead, offer df as option
# TODO: parse for one file instead
procheck_summaries = glob.glob(os.path.join(quality_directory, '*.sum'))
if len(procheck_summaries) == 0:
return pd.DataFrame()
all_procheck = {}
for summ in procheck_summaries:
structure_id = os.path.basename(summ).split('.sum')[0]
procheck_dict = {}
with open(summ) as f_in:
lines = (line.rstrip() for line in f_in) # All lines including the blank ones
lines = (line for line in lines if line) # Non-blank lines
for line in lines:
if len(line.split()) > 1:
if line.split()[1] == 'Ramachandran':
procheck_dict['procheck_rama_favored'] = percentage_to_float(line.split()[3])
procheck_dict['procheck_rama_allowed'] = percentage_to_float(line.split()[5])
procheck_dict['procheck_rama_allowed_plus'] = percentage_to_float(line.split()[7])
procheck_dict['procheck_rama_disallowed'] = percentage_to_float(line.split()[9])
if line.split()[1] == 'G-factors':
procheck_dict['procheck_gfac_dihedrals'] = line.split()[3]
procheck_dict['procheck_gfac_covalent'] = line.split()[5]
procheck_dict['procheck_gfac_overall'] = line.split()[7]
all_procheck[structure_id] = procheck_dict
DF_PROCHECK = pd.DataFrame.from_dict(all_procheck, orient='index')
return DF_PROCHECK | [
"def",
"parse_procheck",
"(",
"quality_directory",
")",
":",
"# TODO: save as dict instead, offer df as option",
"# TODO: parse for one file instead",
"procheck_summaries",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"quality_directory",
",",
"'*.s... | Parses all PROCHECK files in a directory and returns a Pandas DataFrame of the results
Args:
quality_directory: path to directory with PROCHECK output (.sum files)
Returns:
Pandas DataFrame: Summary of PROCHECK results | [
"Parses",
"all",
"PROCHECK",
"files",
"in",
"a",
"directory",
"and",
"returns",
"a",
"Pandas",
"DataFrame",
"of",
"the",
"results"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/quality.py#L159-L200 | train | 29,017 |
SBRG/ssbio | ssbio/protein/structure/properties/quality.py | parse_psqs | def parse_psqs(psqs_results_file):
"""Parse a PSQS result file and returns a Pandas DataFrame of the results
Args:
psqs_results_file: Path to psqs results file
Returns:
Pandas DataFrame: Summary of PSQS results
"""
# TODO: generalize column names for all results, save as dict instead
psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None)
psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb'))
psqs_results = psqs_results.rename(columns = {1:'psqs_local', 2:'psqs_burial', 3:'psqs_contact', 4:'psqs_total'}).drop(0, axis=1)
psqs_results['u_pdb'] = psqs_results['pdb_file'].apply(lambda x: x.upper() if len(x)==4 else np.nan)
psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply(lambda x: x.split('_model1')[0] if len(x)>4 else np.nan)
psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)]
return psqs_results | python | def parse_psqs(psqs_results_file):
"""Parse a PSQS result file and returns a Pandas DataFrame of the results
Args:
psqs_results_file: Path to psqs results file
Returns:
Pandas DataFrame: Summary of PSQS results
"""
# TODO: generalize column names for all results, save as dict instead
psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None)
psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb'))
psqs_results = psqs_results.rename(columns = {1:'psqs_local', 2:'psqs_burial', 3:'psqs_contact', 4:'psqs_total'}).drop(0, axis=1)
psqs_results['u_pdb'] = psqs_results['pdb_file'].apply(lambda x: x.upper() if len(x)==4 else np.nan)
psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply(lambda x: x.split('_model1')[0] if len(x)>4 else np.nan)
psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)]
return psqs_results | [
"def",
"parse_psqs",
"(",
"psqs_results_file",
")",
":",
"# TODO: generalize column names for all results, save as dict instead",
"psqs_results",
"=",
"pd",
".",
"read_csv",
"(",
"psqs_results_file",
",",
"sep",
"=",
"'\\t'",
",",
"header",
"=",
"None",
")",
"psqs_resul... | Parse a PSQS result file and returns a Pandas DataFrame of the results
Args:
psqs_results_file: Path to psqs results file
Returns:
Pandas DataFrame: Summary of PSQS results | [
"Parse",
"a",
"PSQS",
"result",
"file",
"and",
"returns",
"a",
"Pandas",
"DataFrame",
"of",
"the",
"results"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/quality.py#L203-L223 | train | 29,018 |
SBRG/ssbio | ssbio/core/protein.py | Protein.protein_statistics | def protein_statistics(self):
"""Get a dictionary of basic statistics describing this protein"""
# TODO: can i use get_dict here instead
d = {}
d['id'] = self.id
d['sequences'] = [x.id for x in self.sequences]
d['num_sequences'] = self.num_sequences
if self.representative_sequence:
d['representative_sequence'] = self.representative_sequence.id
d['repseq_gene_name'] = self.representative_sequence.gene_name
d['repseq_uniprot'] = self.representative_sequence.uniprot
d['repseq_description'] = self.representative_sequence.description
d['num_structures'] = self.num_structures
d['experimental_structures'] = [x.id for x in self.get_experimental_structures()]
d['num_experimental_structures'] = self.num_structures_experimental
d['homology_models'] = [x.id for x in self.get_homology_models()]
d['num_homology_models'] = self.num_structures_homology
if self.representative_structure:
d['representative_structure'] = self.representative_structure.id
d['representative_chain'] = self.representative_chain
d['representative_chain_seq_coverage'] = self.representative_chain_seq_coverage
d['repstruct_description'] = self.description
if self.representative_structure.is_experimental:
d['repstruct_resolution'] = self.representative_structure.resolution
d['num_sequence_alignments'] = len(self.sequence_alignments)
d['num_structure_alignments'] = len(self.structure_alignments)
return d | python | def protein_statistics(self):
"""Get a dictionary of basic statistics describing this protein"""
# TODO: can i use get_dict here instead
d = {}
d['id'] = self.id
d['sequences'] = [x.id for x in self.sequences]
d['num_sequences'] = self.num_sequences
if self.representative_sequence:
d['representative_sequence'] = self.representative_sequence.id
d['repseq_gene_name'] = self.representative_sequence.gene_name
d['repseq_uniprot'] = self.representative_sequence.uniprot
d['repseq_description'] = self.representative_sequence.description
d['num_structures'] = self.num_structures
d['experimental_structures'] = [x.id for x in self.get_experimental_structures()]
d['num_experimental_structures'] = self.num_structures_experimental
d['homology_models'] = [x.id for x in self.get_homology_models()]
d['num_homology_models'] = self.num_structures_homology
if self.representative_structure:
d['representative_structure'] = self.representative_structure.id
d['representative_chain'] = self.representative_chain
d['representative_chain_seq_coverage'] = self.representative_chain_seq_coverage
d['repstruct_description'] = self.description
if self.representative_structure.is_experimental:
d['repstruct_resolution'] = self.representative_structure.resolution
d['num_sequence_alignments'] = len(self.sequence_alignments)
d['num_structure_alignments'] = len(self.structure_alignments)
return d | [
"def",
"protein_statistics",
"(",
"self",
")",
":",
"# TODO: can i use get_dict here instead",
"d",
"=",
"{",
"}",
"d",
"[",
"'id'",
"]",
"=",
"self",
".",
"id",
"d",
"[",
"'sequences'",
"]",
"=",
"[",
"x",
".",
"id",
"for",
"x",
"in",
"self",
".",
"... | Get a dictionary of basic statistics describing this protein | [
"Get",
"a",
"dictionary",
"of",
"basic",
"statistics",
"describing",
"this",
"protein"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L192-L221 | train | 29,019 |
SBRG/ssbio | ssbio/core/protein.py | Protein.filter_sequences | def filter_sequences(self, seq_type):
"""Return a DictList of only specified types in the sequences attribute.
Args:
seq_type (SeqProp): Object type
Returns:
DictList: A filtered DictList of specified object type only
"""
return DictList(x for x in self.sequences if isinstance(x, seq_type)) | python | def filter_sequences(self, seq_type):
"""Return a DictList of only specified types in the sequences attribute.
Args:
seq_type (SeqProp): Object type
Returns:
DictList: A filtered DictList of specified object type only
"""
return DictList(x for x in self.sequences if isinstance(x, seq_type)) | [
"def",
"filter_sequences",
"(",
"self",
",",
"seq_type",
")",
":",
"return",
"DictList",
"(",
"x",
"for",
"x",
"in",
"self",
".",
"sequences",
"if",
"isinstance",
"(",
"x",
",",
"seq_type",
")",
")"
] | Return a DictList of only specified types in the sequences attribute.
Args:
seq_type (SeqProp): Object type
Returns:
DictList: A filtered DictList of specified object type only | [
"Return",
"a",
"DictList",
"of",
"only",
"specified",
"types",
"in",
"the",
"sequences",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L264-L274 | train | 29,020 |
SBRG/ssbio | ssbio/core/protein.py | Protein.load_kegg | def load_kegg(self, kegg_id, kegg_organism_code=None, kegg_seq_file=None, kegg_metadata_file=None,
set_as_representative=False, download=False, outdir=None, force_rerun=False):
"""Load a KEGG ID, sequence, and metadata files into the sequences attribute.
Args:
kegg_id (str): KEGG ID
kegg_organism_code (str): KEGG organism code to prepend to the kegg_id if not part of it already.
Example: ``eco:b1244``, ``eco`` is the organism code
kegg_seq_file (str): Path to KEGG FASTA file
kegg_metadata_file (str): Path to KEGG metadata file (raw KEGG format)
set_as_representative (bool): If this KEGG ID should be set as the representative sequence
download (bool): If the KEGG sequence and metadata files should be downloaded if not provided
outdir (str): Where the sequence and metadata files should be downloaded to
force_rerun (bool): If ID should be reloaded and files redownloaded
Returns:
KEGGProp: object contained in the sequences attribute
"""
if download:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
if kegg_organism_code:
kegg_id = kegg_organism_code + ':' + kegg_id
# If we have already loaded the KEGG ID
if self.sequences.has_id(kegg_id):
# Remove it if we want to force rerun things
if force_rerun:
existing = self.sequences.get_by_id(kegg_id)
self.sequences.remove(existing)
# Otherwise just get that KEGG object
else:
log.debug('{}: KEGG ID already present in list of sequences'.format(kegg_id))
kegg_prop = self.sequences.get_by_id(kegg_id)
# Check again (instead of else) in case we removed it if force rerun
if not self.sequences.has_id(kegg_id):
kegg_prop = KEGGProp(id=kegg_id, seq=None, fasta_path=kegg_seq_file, txt_path=kegg_metadata_file)
if download:
kegg_prop.download_seq_file(outdir, force_rerun)
kegg_prop.download_metadata_file(outdir, force_rerun)
# Check if KEGG sequence matches a potentially set representative sequence
# Do not add any info if a UniProt ID was already mapped though, we want to use that
if self.representative_sequence:
if not self.representative_sequence.uniprot:
if kegg_prop.equal_to(self.representative_sequence):
# Update the representative sequence field with KEGG metadata
self.representative_sequence.update(kegg_prop.get_dict(), only_keys=['sequence_path',
'metadata_path',
'kegg',
'description',
'taxonomy',
'id',
'pdbs',
'uniprot',
'seq_record',
'gene_name',
'refseq'])
else:
# TODO: add option to use manual or kegg sequence if things do not match
log.warning('{}: representative sequence does not match mapped KEGG sequence.'.format(self.id))
self.sequences.append(kegg_prop)
if set_as_representative:
self.representative_sequence = kegg_prop
return self.sequences.get_by_id(kegg_id) | python | def load_kegg(self, kegg_id, kegg_organism_code=None, kegg_seq_file=None, kegg_metadata_file=None,
set_as_representative=False, download=False, outdir=None, force_rerun=False):
"""Load a KEGG ID, sequence, and metadata files into the sequences attribute.
Args:
kegg_id (str): KEGG ID
kegg_organism_code (str): KEGG organism code to prepend to the kegg_id if not part of it already.
Example: ``eco:b1244``, ``eco`` is the organism code
kegg_seq_file (str): Path to KEGG FASTA file
kegg_metadata_file (str): Path to KEGG metadata file (raw KEGG format)
set_as_representative (bool): If this KEGG ID should be set as the representative sequence
download (bool): If the KEGG sequence and metadata files should be downloaded if not provided
outdir (str): Where the sequence and metadata files should be downloaded to
force_rerun (bool): If ID should be reloaded and files redownloaded
Returns:
KEGGProp: object contained in the sequences attribute
"""
if download:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
if kegg_organism_code:
kegg_id = kegg_organism_code + ':' + kegg_id
# If we have already loaded the KEGG ID
if self.sequences.has_id(kegg_id):
# Remove it if we want to force rerun things
if force_rerun:
existing = self.sequences.get_by_id(kegg_id)
self.sequences.remove(existing)
# Otherwise just get that KEGG object
else:
log.debug('{}: KEGG ID already present in list of sequences'.format(kegg_id))
kegg_prop = self.sequences.get_by_id(kegg_id)
# Check again (instead of else) in case we removed it if force rerun
if not self.sequences.has_id(kegg_id):
kegg_prop = KEGGProp(id=kegg_id, seq=None, fasta_path=kegg_seq_file, txt_path=kegg_metadata_file)
if download:
kegg_prop.download_seq_file(outdir, force_rerun)
kegg_prop.download_metadata_file(outdir, force_rerun)
# Check if KEGG sequence matches a potentially set representative sequence
# Do not add any info if a UniProt ID was already mapped though, we want to use that
if self.representative_sequence:
if not self.representative_sequence.uniprot:
if kegg_prop.equal_to(self.representative_sequence):
# Update the representative sequence field with KEGG metadata
self.representative_sequence.update(kegg_prop.get_dict(), only_keys=['sequence_path',
'metadata_path',
'kegg',
'description',
'taxonomy',
'id',
'pdbs',
'uniprot',
'seq_record',
'gene_name',
'refseq'])
else:
# TODO: add option to use manual or kegg sequence if things do not match
log.warning('{}: representative sequence does not match mapped KEGG sequence.'.format(self.id))
self.sequences.append(kegg_prop)
if set_as_representative:
self.representative_sequence = kegg_prop
return self.sequences.get_by_id(kegg_id) | [
"def",
"load_kegg",
"(",
"self",
",",
"kegg_id",
",",
"kegg_organism_code",
"=",
"None",
",",
"kegg_seq_file",
"=",
"None",
",",
"kegg_metadata_file",
"=",
"None",
",",
"set_as_representative",
"=",
"False",
",",
"download",
"=",
"False",
",",
"outdir",
"=",
... | Load a KEGG ID, sequence, and metadata files into the sequences attribute.
Args:
kegg_id (str): KEGG ID
kegg_organism_code (str): KEGG organism code to prepend to the kegg_id if not part of it already.
Example: ``eco:b1244``, ``eco`` is the organism code
kegg_seq_file (str): Path to KEGG FASTA file
kegg_metadata_file (str): Path to KEGG metadata file (raw KEGG format)
set_as_representative (bool): If this KEGG ID should be set as the representative sequence
download (bool): If the KEGG sequence and metadata files should be downloaded if not provided
outdir (str): Where the sequence and metadata files should be downloaded to
force_rerun (bool): If ID should be reloaded and files redownloaded
Returns:
KEGGProp: object contained in the sequences attribute | [
"Load",
"a",
"KEGG",
"ID",
"sequence",
"and",
"metadata",
"files",
"into",
"the",
"sequences",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L276-L348 | train | 29,021 |
SBRG/ssbio | ssbio/core/protein.py | Protein.load_manual_sequence_file | def load_manual_sequence_file(self, ident, seq_file, copy_file=False, outdir=None, set_as_representative=False):
"""Load a manual sequence, given as a FASTA file and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
ident (str): Sequence ID
seq_file (str): Path to sequence FASTA file
copy_file (bool): If the FASTA file should be copied to the protein's sequences folder or the ``outdir``, if
protein folder has not been set
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute
"""
if copy_file:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
shutil.copy(seq_file, outdir)
seq_file = op.join(outdir, seq_file)
manual_sequence = SeqProp(id=ident, sequence_path=seq_file, seq=None)
self.sequences.append(manual_sequence)
if set_as_representative:
self.representative_sequence = manual_sequence
return self.sequences.get_by_id(ident) | python | def load_manual_sequence_file(self, ident, seq_file, copy_file=False, outdir=None, set_as_representative=False):
"""Load a manual sequence, given as a FASTA file and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
ident (str): Sequence ID
seq_file (str): Path to sequence FASTA file
copy_file (bool): If the FASTA file should be copied to the protein's sequences folder or the ``outdir``, if
protein folder has not been set
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute
"""
if copy_file:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
shutil.copy(seq_file, outdir)
seq_file = op.join(outdir, seq_file)
manual_sequence = SeqProp(id=ident, sequence_path=seq_file, seq=None)
self.sequences.append(manual_sequence)
if set_as_representative:
self.representative_sequence = manual_sequence
return self.sequences.get_by_id(ident) | [
"def",
"load_manual_sequence_file",
"(",
"self",
",",
"ident",
",",
"seq_file",
",",
"copy_file",
"=",
"False",
",",
"outdir",
"=",
"None",
",",
"set_as_representative",
"=",
"False",
")",
":",
"if",
"copy_file",
":",
"if",
"not",
"outdir",
":",
"outdir",
... | Load a manual sequence, given as a FASTA file and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
ident (str): Sequence ID
seq_file (str): Path to sequence FASTA file
copy_file (bool): If the FASTA file should be copied to the protein's sequences folder or the ``outdir``, if
protein folder has not been set
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute | [
"Load",
"a",
"manual",
"sequence",
"given",
"as",
"a",
"FASTA",
"file",
"and",
"optionally",
"set",
"it",
"as",
"the",
"representative",
"sequence",
".",
"Also",
"store",
"it",
"in",
"the",
"sequences",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L422-L452 | train | 29,022 |
SBRG/ssbio | ssbio/core/protein.py | Protein.load_manual_sequence | def load_manual_sequence(self, seq, ident=None, write_fasta_file=False, outdir=None,
set_as_representative=False, force_rewrite=False):
"""Load a manual sequence given as a string and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object
ident (str): Optional identifier for the sequence, required if seq is a string. Also will override existing
IDs in Seq or SeqRecord objects if set.
write_fasta_file (bool): If this sequence should be written out to a FASTA file
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
force_rewrite (bool): If the FASTA file should be overwritten if it already exists
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute
"""
if write_fasta_file:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, '{}.faa'.format(ident))
else:
outfile = None
if isinstance(seq, str) or isinstance(seq, Seq):
if not ident:
raise ValueError('ID must be specified if sequence is a string or Seq object')
manual_sequence = SeqProp(id=ident, seq=seq)
else:
if not ident:
# Use ID from SeqRecord ID if new ID not provided
ident = seq.id
else:
# Overwrite SeqRecord ID with new ID if provided
seq.id = ident
manual_sequence = SeqProp(id=ident, seq=seq, name=seq.name, description=seq.description)
if write_fasta_file:
manual_sequence.write_fasta_file(outfile=outfile, force_rerun=force_rewrite)
self.sequences.append(manual_sequence)
if set_as_representative:
self.representative_sequence = manual_sequence
return self.sequences.get_by_id(ident) | python | def load_manual_sequence(self, seq, ident=None, write_fasta_file=False, outdir=None,
set_as_representative=False, force_rewrite=False):
"""Load a manual sequence given as a string and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object
ident (str): Optional identifier for the sequence, required if seq is a string. Also will override existing
IDs in Seq or SeqRecord objects if set.
write_fasta_file (bool): If this sequence should be written out to a FASTA file
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
force_rewrite (bool): If the FASTA file should be overwritten if it already exists
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute
"""
if write_fasta_file:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, '{}.faa'.format(ident))
else:
outfile = None
if isinstance(seq, str) or isinstance(seq, Seq):
if not ident:
raise ValueError('ID must be specified if sequence is a string or Seq object')
manual_sequence = SeqProp(id=ident, seq=seq)
else:
if not ident:
# Use ID from SeqRecord ID if new ID not provided
ident = seq.id
else:
# Overwrite SeqRecord ID with new ID if provided
seq.id = ident
manual_sequence = SeqProp(id=ident, seq=seq, name=seq.name, description=seq.description)
if write_fasta_file:
manual_sequence.write_fasta_file(outfile=outfile, force_rerun=force_rewrite)
self.sequences.append(manual_sequence)
if set_as_representative:
self.representative_sequence = manual_sequence
return self.sequences.get_by_id(ident) | [
"def",
"load_manual_sequence",
"(",
"self",
",",
"seq",
",",
"ident",
"=",
"None",
",",
"write_fasta_file",
"=",
"False",
",",
"outdir",
"=",
"None",
",",
"set_as_representative",
"=",
"False",
",",
"force_rewrite",
"=",
"False",
")",
":",
"if",
"write_fasta... | Load a manual sequence given as a string and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
seq (str, Seq, SeqRecord): Sequence string, Biopython Seq or SeqRecord object
ident (str): Optional identifier for the sequence, required if seq is a string. Also will override existing
IDs in Seq or SeqRecord objects if set.
write_fasta_file (bool): If this sequence should be written out to a FASTA file
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
force_rewrite (bool): If the FASTA file should be overwritten if it already exists
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute | [
"Load",
"a",
"manual",
"sequence",
"given",
"as",
"a",
"string",
"and",
"optionally",
"set",
"it",
"as",
"the",
"representative",
"sequence",
".",
"Also",
"store",
"it",
"in",
"the",
"sequences",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L454-L501 | train | 29,023 |
SBRG/ssbio | ssbio/core/protein.py | Protein.write_all_sequences_file | def write_all_sequences_file(self, outname, outdir=None):
"""Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory for the file, default is the sequences directory
"""
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, outname + '.faa')
SeqIO.write(self.sequences, outfile, "fasta")
log.info('{}: wrote all protein sequences to file'.format(outfile))
return outfile | python | def write_all_sequences_file(self, outname, outdir=None):
"""Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory for the file, default is the sequences directory
"""
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, outname + '.faa')
SeqIO.write(self.sequences, outfile, "fasta")
log.info('{}: wrote all protein sequences to file'.format(outfile))
return outfile | [
"def",
"write_all_sequences_file",
"(",
"self",
",",
"outname",
",",
"outdir",
"=",
"None",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"self",
".",
"sequence_dir",
"if",
"not",
"outdir",
":",
"raise",
"ValueError",
"(",
"'Output directory must be spe... | Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory for the file, default is the sequences directory | [
"Write",
"all",
"the",
"stored",
"sequences",
"as",
"a",
"single",
"FASTA",
"file",
".",
"By",
"default",
"sets",
"IDs",
"to",
"model",
"gene",
"IDs",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L706-L724 | train | 29,024 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_sequence_sliding_window_properties | def get_sequence_sliding_window_properties(self, scale, window, representative_only=True):
"""Run Biopython ProteinAnalysis with a sliding window to calculate a given property.
Results are stored in the protein's respective SeqProp objects at ``.letter_annotations``
Args:
scale (str): Scale name
window (int): Sliding window size
representative_only (bool): If analysis should only be run on the representative sequence
"""
if representative_only:
# Check if a representative sequence was set
if not self.representative_sequence:
log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))
return
# Also need to check if a sequence has been stored
if not self.representative_sequence.seq:
log.warning('{}: representative sequence {} set, but no sequence stored. '
'Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))
return
self.representative_sequence.get_sliding_window_properties(scale=scale, window=window)
if not representative_only:
for s in self.sequences:
# Need to check if a sequence has been stored
if not s.seq:
log.warning('{}: no sequence stored. '
'Cannot get sequence properties.'.format(s.id))
continue
else:
s.get_sliding_window_properties(scale=scale, window=window) | python | def get_sequence_sliding_window_properties(self, scale, window, representative_only=True):
"""Run Biopython ProteinAnalysis with a sliding window to calculate a given property.
Results are stored in the protein's respective SeqProp objects at ``.letter_annotations``
Args:
scale (str): Scale name
window (int): Sliding window size
representative_only (bool): If analysis should only be run on the representative sequence
"""
if representative_only:
# Check if a representative sequence was set
if not self.representative_sequence:
log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))
return
# Also need to check if a sequence has been stored
if not self.representative_sequence.seq:
log.warning('{}: representative sequence {} set, but no sequence stored. '
'Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))
return
self.representative_sequence.get_sliding_window_properties(scale=scale, window=window)
if not representative_only:
for s in self.sequences:
# Need to check if a sequence has been stored
if not s.seq:
log.warning('{}: no sequence stored. '
'Cannot get sequence properties.'.format(s.id))
continue
else:
s.get_sliding_window_properties(scale=scale, window=window) | [
"def",
"get_sequence_sliding_window_properties",
"(",
"self",
",",
"scale",
",",
"window",
",",
"representative_only",
"=",
"True",
")",
":",
"if",
"representative_only",
":",
"# Check if a representative sequence was set",
"if",
"not",
"self",
".",
"representative_sequen... | Run Biopython ProteinAnalysis with a sliding window to calculate a given property.
Results are stored in the protein's respective SeqProp objects at ``.letter_annotations``
Args:
scale (str): Scale name
window (int): Sliding window size
representative_only (bool): If analysis should only be run on the representative sequence | [
"Run",
"Biopython",
"ProteinAnalysis",
"with",
"a",
"sliding",
"window",
"to",
"calculate",
"a",
"given",
"property",
".",
"Results",
"are",
"stored",
"in",
"the",
"protein",
"s",
"respective",
"SeqProp",
"objects",
"at",
".",
"letter_annotations"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L761-L794 | train | 29,025 |
SBRG/ssbio | ssbio/core/protein.py | Protein.prep_itasser_modeling | def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for the representative sequence.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.structure_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.structure_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
repseq = self.representative_sequence
itasser_kwargs = {'light': True,
'java_home': None,
'binding_site_pred': False,
'ec_pred': False,
'go_pred': False,
'job_scheduler_header': None,
'additional_options': None}
if kwargs:
itasser_kwargs.update(kwargs)
ITASSERPrep(ident=self.id, seq_str=repseq.seq_str, root_dir=self.homology_models_dir,
itasser_path=itasser_installation, itlib_path=itlib_folder,
runtype=runtype, print_exec=print_exec, execute_dir=execute_from_dir,
java_home=itasser_kwargs['java_home'],
light=itasser_kwargs['light'],
binding_site_pred=itasser_kwargs['binding_site_pred'],
ec_pred=itasser_kwargs['ec_pred'],
go_pred=itasser_kwargs['go_pred'],
job_scheduler_header=itasser_kwargs['job_scheduler_header'],
additional_options=itasser_kwargs['additional_options'])
log.debug('Prepared I-TASSER modeling folder {}'.format(self.homology_models_dir)) | python | def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for the representative sequence.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.structure_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.structure_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
repseq = self.representative_sequence
itasser_kwargs = {'light': True,
'java_home': None,
'binding_site_pred': False,
'ec_pred': False,
'go_pred': False,
'job_scheduler_header': None,
'additional_options': None}
if kwargs:
itasser_kwargs.update(kwargs)
ITASSERPrep(ident=self.id, seq_str=repseq.seq_str, root_dir=self.homology_models_dir,
itasser_path=itasser_installation, itlib_path=itlib_folder,
runtype=runtype, print_exec=print_exec, execute_dir=execute_from_dir,
java_home=itasser_kwargs['java_home'],
light=itasser_kwargs['light'],
binding_site_pred=itasser_kwargs['binding_site_pred'],
ec_pred=itasser_kwargs['ec_pred'],
go_pred=itasser_kwargs['go_pred'],
job_scheduler_header=itasser_kwargs['job_scheduler_header'],
additional_options=itasser_kwargs['additional_options'])
log.debug('Prepared I-TASSER modeling folder {}'.format(self.homology_models_dir)) | [
"def",
"prep_itasser_modeling",
"(",
"self",
",",
"itasser_installation",
",",
"itlib_folder",
",",
"runtype",
",",
"create_in_dir",
"=",
"None",
",",
"execute_from_dir",
"=",
"None",
",",
"print_exec",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
... | Prepare to run I-TASSER homology modeling for the representative sequence.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp? | [
"Prepare",
"to",
"run",
"I",
"-",
"TASSER",
"homology",
"modeling",
"for",
"the",
"representative",
"sequence",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L796-L852 | train | 29,026 |
SBRG/ssbio | ssbio/core/protein.py | Protein.map_uniprot_to_pdb | def map_uniprot_to_pdb(self, seq_ident_cutoff=0.0, outdir=None, force_rerun=False):
"""Map the representative sequence's UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to the protein sequences folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID
"""
if not self.representative_sequence:
log.error('{}: no representative sequence set, cannot use best structures API'.format(self.id))
return None
# Check if a UniProt ID is attached to the representative sequence
uniprot_id = self.representative_sequence.uniprot
if not uniprot_id:
log.error('{}: no representative UniProt ID set, cannot use best structures API'.format(self.id))
return None
if '-' in uniprot_id:
log.debug('{}: "-" detected in UniProt ID, isoform specific sequences are ignored with best structures API'.format(self.id))
uniprot_id = uniprot_id.split('-')[0]
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
best_structures = ssbio.databases.pdb.best_structures(uniprot_id,
outname='{}_best_structures'.format(custom_slugify(uniprot_id)),
outdir=outdir,
seq_ident_cutoff=seq_ident_cutoff,
force_rerun=force_rerun)
new_pdbs = []
if best_structures:
rank = 1
for best_structure in best_structures:
currpdb = str(best_structure['pdb_id'].lower())
new_pdbs.append(currpdb)
currchain = str(best_structure['chain_id'])
# load_pdb will append this protein to the list
new_pdb = self.load_pdb(pdb_id=currpdb, mapped_chains=currchain)
# Also add this chain to the chains attribute so we can save the
# info we get from best_structures
new_pdb.add_chain_ids(currchain)
pdb_specific_keys = ['experimental_method', 'resolution']
chain_specific_keys = ['coverage', 'start', 'end', 'unp_start', 'unp_end']
new_pdb.update(best_structure, only_keys=pdb_specific_keys)
new_chain = new_pdb.chains.get_by_id(currchain)
new_chain.update(best_structure, only_keys=chain_specific_keys)
new_chain.update({'rank': rank})
rank += 1
log.debug('{}, {}: {} PDB/chain pairs mapped'.format(self.id, uniprot_id, len(best_structures)))
else:
log.debug('{}, {}: no PDB/chain pairs mapped'.format(self.id, uniprot_id))
return new_pdbs | python | def map_uniprot_to_pdb(self, seq_ident_cutoff=0.0, outdir=None, force_rerun=False):
"""Map the representative sequence's UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to the protein sequences folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID
"""
if not self.representative_sequence:
log.error('{}: no representative sequence set, cannot use best structures API'.format(self.id))
return None
# Check if a UniProt ID is attached to the representative sequence
uniprot_id = self.representative_sequence.uniprot
if not uniprot_id:
log.error('{}: no representative UniProt ID set, cannot use best structures API'.format(self.id))
return None
if '-' in uniprot_id:
log.debug('{}: "-" detected in UniProt ID, isoform specific sequences are ignored with best structures API'.format(self.id))
uniprot_id = uniprot_id.split('-')[0]
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
best_structures = ssbio.databases.pdb.best_structures(uniprot_id,
outname='{}_best_structures'.format(custom_slugify(uniprot_id)),
outdir=outdir,
seq_ident_cutoff=seq_ident_cutoff,
force_rerun=force_rerun)
new_pdbs = []
if best_structures:
rank = 1
for best_structure in best_structures:
currpdb = str(best_structure['pdb_id'].lower())
new_pdbs.append(currpdb)
currchain = str(best_structure['chain_id'])
# load_pdb will append this protein to the list
new_pdb = self.load_pdb(pdb_id=currpdb, mapped_chains=currchain)
# Also add this chain to the chains attribute so we can save the
# info we get from best_structures
new_pdb.add_chain_ids(currchain)
pdb_specific_keys = ['experimental_method', 'resolution']
chain_specific_keys = ['coverage', 'start', 'end', 'unp_start', 'unp_end']
new_pdb.update(best_structure, only_keys=pdb_specific_keys)
new_chain = new_pdb.chains.get_by_id(currchain)
new_chain.update(best_structure, only_keys=chain_specific_keys)
new_chain.update({'rank': rank})
rank += 1
log.debug('{}, {}: {} PDB/chain pairs mapped'.format(self.id, uniprot_id, len(best_structures)))
else:
log.debug('{}, {}: no PDB/chain pairs mapped'.format(self.id, uniprot_id))
return new_pdbs | [
"def",
"map_uniprot_to_pdb",
"(",
"self",
",",
"seq_ident_cutoff",
"=",
"0.0",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"representative_sequence",
":",
"log",
".",
"error",
"(",
"'{}: no representative s... | Map the representative sequence's UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to the protein sequences folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID | [
"Map",
"the",
"representative",
"sequence",
"s",
"UniProt",
"ID",
"to",
"PDB",
"IDs",
"using",
"the",
"PDBe",
"Best",
"Structures",
"API",
".",
"Will",
"save",
"a",
"JSON",
"file",
"of",
"the",
"results",
"to",
"the",
"protein",
"sequences",
"folder",
"."
... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L937-L1008 | train | 29,027 |
SBRG/ssbio | ssbio/core/protein.py | Protein.load_pdb | def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True,
set_as_representative=False, representative_chain=None, force_rerun=False):
"""Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute
"""
if self.structures.has_id(pdb_id):
# Remove the structure if set to force rerun
if force_rerun:
existing = self.structures.get_by_id(pdb_id)
self.structures.remove(existing)
# Otherwise just retrieve it
else:
log.debug('{}: PDB ID already present in list of structures'.format(pdb_id))
pdb = self.structures.get_by_id(pdb_id)
if pdb_file:
pdb.load_structure_path(pdb_file, file_type)
if mapped_chains:
pdb.add_mapped_chain_ids(mapped_chains)
# Create a new StructProp entry
if not self.structures.has_id(pdb_id):
if is_experimental:
pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
else:
pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
self.structures.append(pdb)
if set_as_representative:
# Parse structure so chains are stored before setting representative
pdb.parse_structure()
self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun)
return self.structures.get_by_id(pdb_id) | python | def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True,
set_as_representative=False, representative_chain=None, force_rerun=False):
"""Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute
"""
if self.structures.has_id(pdb_id):
# Remove the structure if set to force rerun
if force_rerun:
existing = self.structures.get_by_id(pdb_id)
self.structures.remove(existing)
# Otherwise just retrieve it
else:
log.debug('{}: PDB ID already present in list of structures'.format(pdb_id))
pdb = self.structures.get_by_id(pdb_id)
if pdb_file:
pdb.load_structure_path(pdb_file, file_type)
if mapped_chains:
pdb.add_mapped_chain_ids(mapped_chains)
# Create a new StructProp entry
if not self.structures.has_id(pdb_id):
if is_experimental:
pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
else:
pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
self.structures.append(pdb)
if set_as_representative:
# Parse structure so chains are stored before setting representative
pdb.parse_structure()
self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun)
return self.structures.get_by_id(pdb_id) | [
"def",
"load_pdb",
"(",
"self",
",",
"pdb_id",
",",
"mapped_chains",
"=",
"None",
",",
"pdb_file",
"=",
"None",
",",
"file_type",
"=",
"None",
",",
"is_experimental",
"=",
"True",
",",
"set_as_representative",
"=",
"False",
",",
"representative_chain",
"=",
... | Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute | [
"Load",
"a",
"structure",
"ID",
"and",
"optional",
"structure",
"file",
"into",
"the",
"structures",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1033-L1079 | train | 29,028 |
SBRG/ssbio | ssbio/core/protein.py | Protein.pdb_downloader_and_metadata | def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
"""
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
# Check if we have any PDBs
if self.num_structures_experimental == 0:
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return
downloaded_pdb_ids = []
# Download the PDBs
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
return downloaded_pdb_ids | python | def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
"""
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
# Check if we have any PDBs
if self.num_structures_experimental == 0:
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return
downloaded_pdb_ids = []
# Download the PDBs
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
return downloaded_pdb_ids | [
"def",
"pdb_downloader_and_metadata",
"(",
"self",
",",
"outdir",
"=",
"None",
",",
"pdb_file_type",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"self",
".",
"structure_dir",
"if",
"not",
"outdir",
":... | Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info | [
"Download",
"ALL",
"mapped",
"experimental",
"structures",
"to",
"the",
"protein",
"structures",
"directory",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1204-L1240 | train | 29,029 |
SBRG/ssbio | ssbio/core/protein.py | Protein._get_seqprop_to_seqprop_alignment | def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2):
"""Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop"""
if isinstance(seqprop1, str):
seqprop1_id = seqprop1
else:
seqprop1_id = seqprop1.id
if isinstance(seqprop2, str):
seqprop2_id = seqprop2
else:
seqprop2_id = seqprop2.id
aln_id = '{}_{}'.format(seqprop1_id, seqprop2_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment
else:
raise ValueError('{}: sequence alignment not found, please run the alignment first'.format(aln_id)) | python | def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2):
"""Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop"""
if isinstance(seqprop1, str):
seqprop1_id = seqprop1
else:
seqprop1_id = seqprop1.id
if isinstance(seqprop2, str):
seqprop2_id = seqprop2
else:
seqprop2_id = seqprop2.id
aln_id = '{}_{}'.format(seqprop1_id, seqprop2_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment
else:
raise ValueError('{}: sequence alignment not found, please run the alignment first'.format(aln_id)) | [
"def",
"_get_seqprop_to_seqprop_alignment",
"(",
"self",
",",
"seqprop1",
",",
"seqprop2",
")",
":",
"if",
"isinstance",
"(",
"seqprop1",
",",
"str",
")",
":",
"seqprop1_id",
"=",
"seqprop1",
"else",
":",
"seqprop1_id",
"=",
"seqprop1",
".",
"id",
"if",
"isi... | Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop | [
"Return",
"the",
"alignment",
"stored",
"in",
"self",
".",
"sequence_alignments",
"given",
"a",
"seqprop",
"+",
"another",
"seqprop"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1438-L1455 | train | 29,030 |
SBRG/ssbio | ssbio/core/protein.py | Protein.map_seqprop_resnums_to_seqprop_resnums | def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2):
"""Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.
Args:
resnums (int, list): Residue numbers in seqprop1
seqprop1 (SeqProp): SeqProp object the resnums match to
seqprop2 (SeqProp): SeqProp object you want to map the resnums to
Returns:
dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this
dictionary, that means the residue number cannot be mapped according to alignment!
"""
resnums = ssbio.utils.force_list(resnums)
alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2)
mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums,
a_aln=alignment[0],
b_aln=alignment[1])
return mapped | python | def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2):
"""Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.
Args:
resnums (int, list): Residue numbers in seqprop1
seqprop1 (SeqProp): SeqProp object the resnums match to
seqprop2 (SeqProp): SeqProp object you want to map the resnums to
Returns:
dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this
dictionary, that means the residue number cannot be mapped according to alignment!
"""
resnums = ssbio.utils.force_list(resnums)
alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2)
mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums,
a_aln=alignment[0],
b_aln=alignment[1])
return mapped | [
"def",
"map_seqprop_resnums_to_seqprop_resnums",
"(",
"self",
",",
"resnums",
",",
"seqprop1",
",",
"seqprop2",
")",
":",
"resnums",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"resnums",
")",
"alignment",
"=",
"self",
".",
"_get_seqprop_to_seqprop_alignme... | Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.
Args:
resnums (int, list): Residue numbers in seqprop1
seqprop1 (SeqProp): SeqProp object the resnums match to
seqprop2 (SeqProp): SeqProp object you want to map the resnums to
Returns:
dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this
dictionary, that means the residue number cannot be mapped according to alignment! | [
"Map",
"a",
"residue",
"number",
"in",
"any",
"SeqProp",
"to",
"another",
"SeqProp",
"using",
"the",
"pairwise",
"alignment",
"information",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1463-L1484 | train | 29,031 |
SBRG/ssbio | ssbio/core/protein.py | Protein._get_seqprop_to_structprop_alignment | def _get_seqprop_to_structprop_alignment(self, seqprop, structprop, chain_id):
"""Return the alignment stored in self.sequence_alignments given a seqprop, structuprop, and chain_id"""
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment
else:
raise ValueError('{}: structure alignment not found, please run the alignment first'.format(aln_id)) | python | def _get_seqprop_to_structprop_alignment(self, seqprop, structprop, chain_id):
"""Return the alignment stored in self.sequence_alignments given a seqprop, structuprop, and chain_id"""
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment
else:
raise ValueError('{}: structure alignment not found, please run the alignment first'.format(aln_id)) | [
"def",
"_get_seqprop_to_structprop_alignment",
"(",
"self",
",",
"seqprop",
",",
"structprop",
",",
"chain_id",
")",
":",
"full_structure_id",
"=",
"'{}-{}'",
".",
"format",
"(",
"structprop",
".",
"id",
",",
"chain_id",
")",
"aln_id",
"=",
"'{}_{}'",
".",
"fo... | Return the alignment stored in self.sequence_alignments given a seqprop, structuprop, and chain_id | [
"Return",
"the",
"alignment",
"stored",
"in",
"self",
".",
"sequence_alignments",
"given",
"a",
"seqprop",
"structuprop",
"and",
"chain_id"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1486-L1495 | train | 29,032 |
SBRG/ssbio | ssbio/core/protein.py | Protein.check_structure_chain_quality | def check_structure_chain_quality(self, seqprop, structprop, chain_id,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True):
"""Report if a structure's chain meets the defined cutoffs for sequence quality."""
alignment = self._get_seqprop_to_structprop_alignment(seqprop=seqprop, structprop=structprop, chain_id=chain_id)
# Compare sequence to structure's sequence using the alignment
chain_passes_quality_check = ssbio.protein.structure.properties.quality.sequence_checker(reference_seq_aln=alignment[0],
structure_seq_aln=alignment[1],
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved)
return chain_passes_quality_check | python | def check_structure_chain_quality(self, seqprop, structprop, chain_id,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True):
"""Report if a structure's chain meets the defined cutoffs for sequence quality."""
alignment = self._get_seqprop_to_structprop_alignment(seqprop=seqprop, structprop=structprop, chain_id=chain_id)
# Compare sequence to structure's sequence using the alignment
chain_passes_quality_check = ssbio.protein.structure.properties.quality.sequence_checker(reference_seq_aln=alignment[0],
structure_seq_aln=alignment[1],
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved)
return chain_passes_quality_check | [
"def",
"check_structure_chain_quality",
"(",
"self",
",",
"seqprop",
",",
"structprop",
",",
"chain_id",
",",
"seq_ident_cutoff",
"=",
"0.5",
",",
"allow_missing_on_termini",
"=",
"0.2",
",",
"allow_mutants",
"=",
"True",
",",
"allow_deletions",
"=",
"False",
",",... | Report if a structure's chain meets the defined cutoffs for sequence quality. | [
"Report",
"if",
"a",
"structure",
"s",
"chain",
"meets",
"the",
"defined",
"cutoffs",
"for",
"sequence",
"quality",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1503-L1519 | train | 29,033 |
SBRG/ssbio | ssbio/core/protein.py | Protein.find_representative_chain | def find_representative_chain(self, seqprop, structprop, chains_to_check=None,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True):
"""Set and return the representative chain based on sequence quality checks to a reference sequence.
Args:
seqprop (SeqProp): SeqProp object to compare to chain sequences
structprop (StructProp): StructProp object with chains to compare to in the ``mapped_chains`` attribute. If
there are none present, ``chains_to_check`` can be specified, otherwise all chains are checked.
chains_to_check (str, list): Chain ID or IDs to check for sequence coverage quality
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be
ignored when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only
residues 5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
Returns:
str: the best chain ID, if any
"""
if chains_to_check:
chains_to_check = ssbio.utils.force_list(chains_to_check)
elif structprop.mapped_chains:
chains_to_check = structprop.mapped_chains
else:
log.warning('{}-{}: no chains specified in structure to align to, all chains will be checked'.format(seqprop.id,
structprop.id))
chains_to_check = structprop.chains.list_attr('id')
for chain_id in chains_to_check:
try:
# Compare sequence to structure's sequence using the alignment
found_good_chain = self.check_structure_chain_quality(seqprop=seqprop, structprop=structprop, chain_id=chain_id,
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved)
except ValueError:
log.error('{}-{}: unable to check chain "{}"'.format(seqprop.id, structprop.id, chain_id))
found_good_chain = False
# If found_good_chain = True, return chain ID
# If not, move on to the next potential chain
if found_good_chain:
stats = self.get_seqprop_to_structprop_alignment_stats(seqprop=seqprop, structprop=structprop, chain_id=chain_id)
self.representative_chain = chain_id
self.representative_chain_seq_coverage = stats['percent_identity']
return chain_id
else:
log.debug('{}: no chains meet quality checks'.format(structprop.id))
return None | python | def find_representative_chain(self, seqprop, structprop, chains_to_check=None,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True):
"""Set and return the representative chain based on sequence quality checks to a reference sequence.
Args:
seqprop (SeqProp): SeqProp object to compare to chain sequences
structprop (StructProp): StructProp object with chains to compare to in the ``mapped_chains`` attribute. If
there are none present, ``chains_to_check`` can be specified, otherwise all chains are checked.
chains_to_check (str, list): Chain ID or IDs to check for sequence coverage quality
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be
ignored when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only
residues 5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
Returns:
str: the best chain ID, if any
"""
if chains_to_check:
chains_to_check = ssbio.utils.force_list(chains_to_check)
elif structprop.mapped_chains:
chains_to_check = structprop.mapped_chains
else:
log.warning('{}-{}: no chains specified in structure to align to, all chains will be checked'.format(seqprop.id,
structprop.id))
chains_to_check = structprop.chains.list_attr('id')
for chain_id in chains_to_check:
try:
# Compare sequence to structure's sequence using the alignment
found_good_chain = self.check_structure_chain_quality(seqprop=seqprop, structprop=structprop, chain_id=chain_id,
seq_ident_cutoff=seq_ident_cutoff,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved)
except ValueError:
log.error('{}-{}: unable to check chain "{}"'.format(seqprop.id, structprop.id, chain_id))
found_good_chain = False
# If found_good_chain = True, return chain ID
# If not, move on to the next potential chain
if found_good_chain:
stats = self.get_seqprop_to_structprop_alignment_stats(seqprop=seqprop, structprop=structprop, chain_id=chain_id)
self.representative_chain = chain_id
self.representative_chain_seq_coverage = stats['percent_identity']
return chain_id
else:
log.debug('{}: no chains meet quality checks'.format(structprop.id))
return None | [
"def",
"find_representative_chain",
"(",
"self",
",",
"seqprop",
",",
"structprop",
",",
"chains_to_check",
"=",
"None",
",",
"seq_ident_cutoff",
"=",
"0.5",
",",
"allow_missing_on_termini",
"=",
"0.2",
",",
"allow_mutants",
"=",
"True",
",",
"allow_deletions",
"=... | Set and return the representative chain based on sequence quality checks to a reference sequence.
Args:
seqprop (SeqProp): SeqProp object to compare to chain sequences
structprop (StructProp): StructProp object with chains to compare to in the ``mapped_chains`` attribute. If
there are none present, ``chains_to_check`` can be specified, otherwise all chains are checked.
chains_to_check (str, list): Chain ID or IDs to check for sequence coverage quality
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be
ignored when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only
residues 5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
Returns:
str: the best chain ID, if any | [
"Set",
"and",
"return",
"the",
"representative",
"chain",
"based",
"on",
"sequence",
"quality",
"checks",
"to",
"a",
"reference",
"sequence",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1521-L1577 | train | 29,034 |
SBRG/ssbio | ssbio/core/protein.py | Protein._map_seqprop_resnums_to_structprop_chain_index | def _map_seqprop_resnums_to_structprop_chain_index(self, resnums, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
if self.representative_structure:
if structprop.id == self.representative_structure.id:
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if access_key not in seqprop.letter_annotations:
raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? '
'Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))
chain_index_mapping = seqprop.letter_annotations[access_key]
resnum_to_chain_index = {}
for x in resnums:
ix = chain_index_mapping[x - 1] - 1
if np.isnan(ix):
log.warning('{}-{}, {}: no equivalent residue found in structure sequence'.format(structprop.id,
chain_id,
x))
else:
resnum_to_chain_index[int(x)] = int(ix)
return resnum_to_chain_index | python | def _map_seqprop_resnums_to_structprop_chain_index(self, resnums, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
if self.representative_structure:
if structprop.id == self.representative_structure.id:
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if access_key not in seqprop.letter_annotations:
raise KeyError('{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? '
'Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))
chain_index_mapping = seqprop.letter_annotations[access_key]
resnum_to_chain_index = {}
for x in resnums:
ix = chain_index_mapping[x - 1] - 1
if np.isnan(ix):
log.warning('{}-{}, {}: no equivalent residue found in structure sequence'.format(structprop.id,
chain_id,
x))
else:
resnum_to_chain_index[int(x)] = int(ix)
return resnum_to_chain_index | [
"def",
"_map_seqprop_resnums_to_structprop_chain_index",
"(",
"self",
",",
"resnums",
",",
"seqprop",
"=",
"None",
",",
"structprop",
"=",
"None",
",",
"chain_id",
"=",
"None",
",",
"use_representatives",
"=",
"False",
")",
":",
"resnums",
"=",
"ssbio",
".",
"... | Map a residue number in any SeqProp to the mapping index in the StructProp + chain ID. This does not provide
a mapping to residue number, only a mapping to the index which then can be mapped to the structure resnum!
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to index
use_representatives (bool): If representative sequence/structure/chain should be used in mapping
Returns:
dict: Mapping of resnums to indices | [
"Map",
"a",
"residue",
"number",
"in",
"any",
"SeqProp",
"to",
"the",
"mapping",
"index",
"in",
"the",
"StructProp",
"+",
"chain",
"ID",
".",
"This",
"does",
"not",
"provide",
"a",
"mapping",
"to",
"residue",
"number",
"only",
"a",
"mapping",
"to",
"the"... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1579-L1633 | train | 29,035 |
SBRG/ssbio | ssbio/core/protein.py | Protein.map_seqprop_resnums_to_structprop_resnums | def map_seqprop_resnums_to_structprop_resnums(self, resnums, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Map a residue number in any SeqProp to the structure's residue number for a specified chain.
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to
use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,
structprop, and chain_id do not need to be defined.
Returns:
dict: Mapping of sequence residue numbers to structure residue numbers
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
mapping_to_repchain_index = self._map_seqprop_resnums_to_structprop_chain_index(resnums=resnums,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
chain = structprop.chains.get_by_id(chain_id)
chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums']
final_mapping = {}
for k, v in mapping_to_repchain_index.items():
k = int(k)
rn = chain_structure_resnum_mapping[v]
if rn == float('Inf'):
log.warning('{}-{}, {}: structure file does not contain coordinates for this residue'.format(structprop.id,
chain_id,
k))
else:
rn = int(rn)
final_mapping[k] = rn
index_of_structure_resnum = chain_structure_resnum_mapping.index(rn)
# Additionally report if residues are the same - they could be different in the structure though
format_data = {'seqprop_id' : seqprop.id,
'seqprop_resid' : seqprop[k - 1],
'seqprop_resnum' : k,
'structprop_id' : structprop.id,
'structprop_chid' : chain_id,
'structprop_resid' : chain.seq_record[index_of_structure_resnum],
'structprop_resnum': rn}
if seqprop[k-1] != chain.seq_record[index_of_structure_resnum]:
log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}. NOTE: this may be due to '
'structural differences'.format(**format_data))
else:
log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}'.format(**format_data))
return final_mapping | python | def map_seqprop_resnums_to_structprop_resnums(self, resnums, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Map a residue number in any SeqProp to the structure's residue number for a specified chain.
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to
use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,
structprop, and chain_id do not need to be defined.
Returns:
dict: Mapping of sequence residue numbers to structure residue numbers
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
mapping_to_repchain_index = self._map_seqprop_resnums_to_structprop_chain_index(resnums=resnums,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
chain = structprop.chains.get_by_id(chain_id)
chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums']
final_mapping = {}
for k, v in mapping_to_repchain_index.items():
k = int(k)
rn = chain_structure_resnum_mapping[v]
if rn == float('Inf'):
log.warning('{}-{}, {}: structure file does not contain coordinates for this residue'.format(structprop.id,
chain_id,
k))
else:
rn = int(rn)
final_mapping[k] = rn
index_of_structure_resnum = chain_structure_resnum_mapping.index(rn)
# Additionally report if residues are the same - they could be different in the structure though
format_data = {'seqprop_id' : seqprop.id,
'seqprop_resid' : seqprop[k - 1],
'seqprop_resnum' : k,
'structprop_id' : structprop.id,
'structprop_chid' : chain_id,
'structprop_resid' : chain.seq_record[index_of_structure_resnum],
'structprop_resnum': rn}
if seqprop[k-1] != chain.seq_record[index_of_structure_resnum]:
log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}. NOTE: this may be due to '
'structural differences'.format(**format_data))
else:
log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}'.format(**format_data))
return final_mapping | [
"def",
"map_seqprop_resnums_to_structprop_resnums",
"(",
"self",
",",
"resnums",
",",
"seqprop",
"=",
"None",
",",
"structprop",
"=",
"None",
",",
"chain_id",
"=",
"None",
",",
"use_representatives",
"=",
"False",
")",
":",
"resnums",
"=",
"ssbio",
".",
"utils... | Map a residue number in any SeqProp to the structure's residue number for a specified chain.
Args:
resnums (int, list): Residue numbers in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map to
use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,
structprop, and chain_id do not need to be defined.
Returns:
dict: Mapping of sequence residue numbers to structure residue numbers | [
"Map",
"a",
"residue",
"number",
"in",
"any",
"SeqProp",
"to",
"the",
"structure",
"s",
"residue",
"number",
"for",
"a",
"specified",
"chain",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1635-L1705 | train | 29,036 |
SBRG/ssbio | ssbio/core/protein.py | Protein.map_structprop_resnums_to_seqprop_resnums | def map_structprop_resnums_to_seqprop_resnums(self, resnums, structprop=None, chain_id=None, seqprop=None,
use_representatives=False):
"""Map a residue number in any StructProp + chain ID to any SeqProp's residue number.
Args:
resnums (int, list): Residue numbers in the structure
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map from
seqprop (SeqProp): SeqProp object
use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,
structprop, and chain_id do not need to be defined.
Returns:
dict: Mapping of structure residue numbers to sequence residue numbers
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
if structprop.id == self.representative_structure.id:
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if access_key not in seqprop.letter_annotations:
raise KeyError(
'{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? '
'Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))
chain = structprop.chains.get_by_id(chain_id)
chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums']
final_mapping = {}
for resnum in resnums:
resnum = int(resnum)
resnum_index = chain_structure_resnum_mapping.index(resnum)
struct_res_singleaa = structprop.chains.get_by_id(chain_id).seq_record[resnum_index]
# if resnum not in seqprop.letter_annotations[access_key]:
# log.warning('{}-{} -> {}: unable to map residue {} from structure to sequence, '
# 'skipping'.format(structprop.id, chain_id, seqprop.id, resnum))
# continue
what = seqprop.letter_annotations[access_key].index(resnum_index+1)
# TODO in progress...
seq_res_singleaa = seqprop[what]
sp_resnum = what + 1
final_mapping[resnum] = sp_resnum
# Additionally report if residues are the same - they could be different in the structure though
format_data = {'seqprop_id' : seqprop.id,
'seqprop_resid' : seq_res_singleaa,
'seqprop_resnum' : sp_resnum,
'structprop_id' : structprop.id,
'structprop_chid' : chain_id,
'structprop_resid' : struct_res_singleaa,
'structprop_resnum': resnum}
if struct_res_singleaa != seq_res_singleaa:
log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}. NOTE: this may be due to '
'structural differences'.format(**format_data))
else:
log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}'.format(**format_data))
return final_mapping | python | def map_structprop_resnums_to_seqprop_resnums(self, resnums, structprop=None, chain_id=None, seqprop=None,
use_representatives=False):
"""Map a residue number in any StructProp + chain ID to any SeqProp's residue number.
Args:
resnums (int, list): Residue numbers in the structure
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map from
seqprop (SeqProp): SeqProp object
use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,
structprop, and chain_id do not need to be defined.
Returns:
dict: Mapping of structure residue numbers to sequence residue numbers
"""
resnums = ssbio.utils.force_list(resnums)
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
if structprop.id == self.representative_structure.id:
full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '')
else:
full_structure_id = '{}-{}'.format(structprop.id, chain_id)
aln_id = '{}_{}'.format(seqprop.id, full_structure_id)
access_key = '{}_chain_index'.format(aln_id)
if access_key not in seqprop.letter_annotations:
raise KeyError(
'{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? '
'Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id))
chain = structprop.chains.get_by_id(chain_id)
chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums']
final_mapping = {}
for resnum in resnums:
resnum = int(resnum)
resnum_index = chain_structure_resnum_mapping.index(resnum)
struct_res_singleaa = structprop.chains.get_by_id(chain_id).seq_record[resnum_index]
# if resnum not in seqprop.letter_annotations[access_key]:
# log.warning('{}-{} -> {}: unable to map residue {} from structure to sequence, '
# 'skipping'.format(structprop.id, chain_id, seqprop.id, resnum))
# continue
what = seqprop.letter_annotations[access_key].index(resnum_index+1)
# TODO in progress...
seq_res_singleaa = seqprop[what]
sp_resnum = what + 1
final_mapping[resnum] = sp_resnum
# Additionally report if residues are the same - they could be different in the structure though
format_data = {'seqprop_id' : seqprop.id,
'seqprop_resid' : seq_res_singleaa,
'seqprop_resnum' : sp_resnum,
'structprop_id' : structprop.id,
'structprop_chid' : chain_id,
'structprop_resid' : struct_res_singleaa,
'structprop_resnum': resnum}
if struct_res_singleaa != seq_res_singleaa:
log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}. NOTE: this may be due to '
'structural differences'.format(**format_data))
else:
log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to '
'structure {structprop_id}-{structprop_chid} residue '
'{structprop_resid}{structprop_resnum}'.format(**format_data))
return final_mapping | [
"def",
"map_structprop_resnums_to_seqprop_resnums",
"(",
"self",
",",
"resnums",
",",
"structprop",
"=",
"None",
",",
"chain_id",
"=",
"None",
",",
"seqprop",
"=",
"None",
",",
"use_representatives",
"=",
"False",
")",
":",
"resnums",
"=",
"ssbio",
".",
"utils... | Map a residue number in any StructProp + chain ID to any SeqProp's residue number.
Args:
resnums (int, list): Residue numbers in the structure
structprop (StructProp): StructProp object
chain_id (str): Chain ID to map from
seqprop (SeqProp): SeqProp object
use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop,
structprop, and chain_id do not need to be defined.
Returns:
dict: Mapping of structure residue numbers to sequence residue numbers | [
"Map",
"a",
"residue",
"number",
"in",
"any",
"StructProp",
"+",
"chain",
"ID",
"to",
"any",
"SeqProp",
"s",
"residue",
"number",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1707-L1787 | train | 29,037 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_seqprop_subsequence_from_structchain_property | def get_seqprop_subsequence_from_structchain_property(self,
property_key, property_value, condition,
seqprop=None, structprop=None, chain_id=None,
use_representatives=False,
return_resnums=False):
"""Get a subsequence as a new SeqProp object given a certain property you want to find in the
given StructProp's chain's letter_annotation
This is similar to the :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property` method but instead of
filtering by the SeqProp's letter_annotation we use the StructProp annotation, and map back to the SeqProp.
Args:
seqprop (SeqRecord, SeqProp): SeqRecord or SeqProp object that has properties stored in its ``letter_annotations`` attribute
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (object): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties
"""
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
chain_prop = structprop.chains.get_by_id(chain_id)
# Get the subsequence from the structure
chain_subseq, subfeat_resnums = chain_prop.get_subsequence_from_property(property_key=property_key,
property_value=property_value,
condition=condition,
return_resnums=True) or (None, [])
if not chain_subseq:
return
# Map subsequence feature resnums back to the seqprop
mapping_dict = self.map_structprop_resnums_to_seqprop_resnums(resnums=subfeat_resnums, structprop=structprop,
chain_id=chain_id,
seqprop=seqprop,
use_representatives=use_representatives)
sub_id = '{}-{}->{}_{}_{}_{}_extracted'.format(structprop.id, chain_id, seqprop.id,
property_key, condition, property_value)
seqprop_resnums = [v for k,v in mapping_dict.items()]
new_sp = seqprop.get_subsequence(resnums=seqprop_resnums, new_id=sub_id, copy_letter_annotations=False)
if not new_sp: # XTODO: investigate errors from subsequence extraction..
return
try:
new_sp.letter_annotations = chain_subseq.letter_annotations
except TypeError:
# If the length of the mapped sequence does not match, log a warning and don't store letter_annotations
log.warning('{}: cannot store structure letter annotations in subsequence, lengths do not match. '
'Likely a deletion or insertion within the structure!'.format(sub_id))
if return_resnums:
return new_sp, seqprop_resnums
else:
return new_sp | python | def get_seqprop_subsequence_from_structchain_property(self,
property_key, property_value, condition,
seqprop=None, structprop=None, chain_id=None,
use_representatives=False,
return_resnums=False):
"""Get a subsequence as a new SeqProp object given a certain property you want to find in the
given StructProp's chain's letter_annotation
This is similar to the :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property` method but instead of
filtering by the SeqProp's letter_annotation we use the StructProp annotation, and map back to the SeqProp.
Args:
seqprop (SeqRecord, SeqProp): SeqRecord or SeqProp object that has properties stored in its ``letter_annotations`` attribute
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (object): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties
"""
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
if not structprop:
raise ValueError('No representative structure set, please specify sequence, structure, and chain ID')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Please specify sequence, structure, and chain ID')
chain_prop = structprop.chains.get_by_id(chain_id)
# Get the subsequence from the structure
chain_subseq, subfeat_resnums = chain_prop.get_subsequence_from_property(property_key=property_key,
property_value=property_value,
condition=condition,
return_resnums=True) or (None, [])
if not chain_subseq:
return
# Map subsequence feature resnums back to the seqprop
mapping_dict = self.map_structprop_resnums_to_seqprop_resnums(resnums=subfeat_resnums, structprop=structprop,
chain_id=chain_id,
seqprop=seqprop,
use_representatives=use_representatives)
sub_id = '{}-{}->{}_{}_{}_{}_extracted'.format(structprop.id, chain_id, seqprop.id,
property_key, condition, property_value)
seqprop_resnums = [v for k,v in mapping_dict.items()]
new_sp = seqprop.get_subsequence(resnums=seqprop_resnums, new_id=sub_id, copy_letter_annotations=False)
if not new_sp: # XTODO: investigate errors from subsequence extraction..
return
try:
new_sp.letter_annotations = chain_subseq.letter_annotations
except TypeError:
# If the length of the mapped sequence does not match, log a warning and don't store letter_annotations
log.warning('{}: cannot store structure letter annotations in subsequence, lengths do not match. '
'Likely a deletion or insertion within the structure!'.format(sub_id))
if return_resnums:
return new_sp, seqprop_resnums
else:
return new_sp | [
"def",
"get_seqprop_subsequence_from_structchain_property",
"(",
"self",
",",
"property_key",
",",
"property_value",
",",
"condition",
",",
"seqprop",
"=",
"None",
",",
"structprop",
"=",
"None",
",",
"chain_id",
"=",
"None",
",",
"use_representatives",
"=",
"False"... | Get a subsequence as a new SeqProp object given a certain property you want to find in the
given StructProp's chain's letter_annotation
This is similar to the :func:`ssbio.protein.sequence.seqprop.SeqProp.get_subsequence_from_property` method but instead of
filtering by the SeqProp's letter_annotation we use the StructProp annotation, and map back to the SeqProp.
Args:
seqprop (SeqRecord, SeqProp): SeqRecord or SeqProp object that has properties stored in its ``letter_annotations`` attribute
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (object): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties | [
"Get",
"a",
"subsequence",
"as",
"a",
"new",
"SeqProp",
"object",
"given",
"a",
"certain",
"property",
"you",
"want",
"to",
"find",
"in",
"the",
"given",
"StructProp",
"s",
"chain",
"s",
"letter_annotation"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1789-L1855 | train | 29,038 |
SBRG/ssbio | ssbio/core/protein.py | Protein._representative_structure_setter | def _representative_structure_setter(self, structprop, keep_chain, clean=True, keep_chemicals=None,
out_suffix='_clean', outdir=None, force_rerun=False):
"""Set the representative structure by 1) cleaning it and 2) copying over attributes of the original structure.
The structure is copied because the chains stored may change, and cleaning it makes a new PDB file.
Args:
structprop (StructProp): StructProp object to set as representative
keep_chain (str): Chain ID to keep
clean (bool): If the PDB file should be cleaned (see ssbio.structure.utils.cleanpdb)
keep_chemicals (str, list): Keep specified chemical names
out_suffix (str): Suffix to append to clean PDB file
outdir (str): Path to output directory
Returns:
StructProp: representative structure
"""
# Set output directory for cleaned PDB file
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
# Create new ID for this representative structure, it cannot be the same as the original one
new_id = 'REP-{}'.format(structprop.id)
# Remove the previously set representative structure if set to force rerun
if self.structures.has_id(new_id):
if force_rerun:
existing = self.structures.get_by_id(new_id)
self.structures.remove(existing)
# If the structure is to be cleaned, and which chain to keep
if clean:
final_pdb = structprop.clean_structure(outdir=outdir, out_suffix=out_suffix,
keep_chemicals=keep_chemicals, keep_chains=keep_chain,
force_rerun=force_rerun)
log.debug('{}: cleaned structure and saved new file at {}'.format(structprop.id, final_pdb))
else:
final_pdb = structprop.structure_path
self.representative_structure = StructProp(ident=new_id, chains=keep_chain, mapped_chains=keep_chain,
structure_path=final_pdb, file_type='pdb')
self.representative_chain = keep_chain
self.representative_structure.update(structprop.get_dict_with_chain(chain=keep_chain),
only_keys=self.__representative_structure_attributes,
overwrite=True)
# Save the original structure ID as an extra attribute
self.representative_structure.original_structure_id = structprop.id
# Also need to parse the clean structure and save its sequence..
self.representative_structure.parse_structure()
# And finally add it to the list of structures
self.structures.append(self.representative_structure) | python | def _representative_structure_setter(self, structprop, keep_chain, clean=True, keep_chemicals=None,
out_suffix='_clean', outdir=None, force_rerun=False):
"""Set the representative structure by 1) cleaning it and 2) copying over attributes of the original structure.
The structure is copied because the chains stored may change, and cleaning it makes a new PDB file.
Args:
structprop (StructProp): StructProp object to set as representative
keep_chain (str): Chain ID to keep
clean (bool): If the PDB file should be cleaned (see ssbio.structure.utils.cleanpdb)
keep_chemicals (str, list): Keep specified chemical names
out_suffix (str): Suffix to append to clean PDB file
outdir (str): Path to output directory
Returns:
StructProp: representative structure
"""
# Set output directory for cleaned PDB file
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
# Create new ID for this representative structure, it cannot be the same as the original one
new_id = 'REP-{}'.format(structprop.id)
# Remove the previously set representative structure if set to force rerun
if self.structures.has_id(new_id):
if force_rerun:
existing = self.structures.get_by_id(new_id)
self.structures.remove(existing)
# If the structure is to be cleaned, and which chain to keep
if clean:
final_pdb = structprop.clean_structure(outdir=outdir, out_suffix=out_suffix,
keep_chemicals=keep_chemicals, keep_chains=keep_chain,
force_rerun=force_rerun)
log.debug('{}: cleaned structure and saved new file at {}'.format(structprop.id, final_pdb))
else:
final_pdb = structprop.structure_path
self.representative_structure = StructProp(ident=new_id, chains=keep_chain, mapped_chains=keep_chain,
structure_path=final_pdb, file_type='pdb')
self.representative_chain = keep_chain
self.representative_structure.update(structprop.get_dict_with_chain(chain=keep_chain),
only_keys=self.__representative_structure_attributes,
overwrite=True)
# Save the original structure ID as an extra attribute
self.representative_structure.original_structure_id = structprop.id
# Also need to parse the clean structure and save its sequence..
self.representative_structure.parse_structure()
# And finally add it to the list of structures
self.structures.append(self.representative_structure) | [
"def",
"_representative_structure_setter",
"(",
"self",
",",
"structprop",
",",
"keep_chain",
",",
"clean",
"=",
"True",
",",
"keep_chemicals",
"=",
"None",
",",
"out_suffix",
"=",
"'_clean'",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
... | Set the representative structure by 1) cleaning it and 2) copying over attributes of the original structure.
The structure is copied because the chains stored may change, and cleaning it makes a new PDB file.
Args:
structprop (StructProp): StructProp object to set as representative
keep_chain (str): Chain ID to keep
clean (bool): If the PDB file should be cleaned (see ssbio.structure.utils.cleanpdb)
keep_chemicals (str, list): Keep specified chemical names
out_suffix (str): Suffix to append to clean PDB file
outdir (str): Path to output directory
Returns:
StructProp: representative structure | [
"Set",
"the",
"representative",
"structure",
"by",
"1",
")",
"cleaning",
"it",
"and",
"2",
")",
"copying",
"over",
"attributes",
"of",
"the",
"original",
"structure",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L1857-L1915 | train | 29,039 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_residue_annotations | def get_residue_annotations(self, seq_resnum, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Get all residue-level annotations stored in the SeqProp ``letter_annotations`` field for a given residue number.
Uses the representative sequence, structure, and chain ID stored by default. If other properties from other
structures are desired, input the proper IDs. An alignment for the given sequence to the structure must
be present in the sequence_alignments list.
Args:
seq_resnum (int): Residue number in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
Returns:
dict: All available letter_annotations for this residue number
"""
if use_representatives:
if seqprop and structprop and chain_id:
raise ValueError('Overriding sequence, structure, and chain IDs with representatives. '
'Set use_representatives to False if custom IDs are to be used.')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Input sequence, structure, and chain to map between, or set use_representatives '
'to True.')
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
# log.debug('Using sequence: {}, structure: {}, chain: {}'.format(seqprop.id, structprop.id, chain_id))
# Create a new SeqFeature
f = SeqFeature(FeatureLocation(seq_resnum-1, seq_resnum))
# Get sequence properties
seq_features = f.extract(seqprop)
# Store in dictionary to return, clean it up
all_info = ssbio.utils.clean_single_dict(indict=seq_features.letter_annotations,
prepend_to_keys='seq_',
remove_keys_containing='_chain_index')
all_info['seq_resnum'] = seq_resnum
all_info['seq_residue'] = str(seq_features.seq)
if structprop:
chain = structprop.chains.get_by_id(chain_id)
# Get structure properties
mapping_to_structure_resnum = self.map_seqprop_resnums_to_structprop_resnums(resnums=seq_resnum,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
# Try finding the residue in the structure
if f.location.end.position in mapping_to_structure_resnum:
struct_resnum = mapping_to_structure_resnum[f.location.end.position]
struct_f = SeqFeature(FeatureLocation(struct_resnum-1, struct_resnum))
struct_seq_features = struct_f.extract(chain.seq_record)
struct_info = ssbio.utils.clean_single_dict(indict=struct_seq_features.letter_annotations,
prepend_to_keys='struct_',
remove_keys_containing='structure_resnums')
struct_info['struct_resnum'] = struct_resnum
struct_info['struct_residue'] = str(struct_seq_features.seq)
all_info.update(struct_info)
# Warn if residue differs from sequence
if seq_features.seq != struct_seq_features.seq:
log.warning('Sequence residue ({}{}) does not match structure residue ({}{}). '
'This may simply be due to differences in the structure'.format(seq_features.seq,
seq_resnum,
struct_seq_features.seq,
struct_resnum))
return all_info | python | def get_residue_annotations(self, seq_resnum, seqprop=None, structprop=None, chain_id=None,
use_representatives=False):
"""Get all residue-level annotations stored in the SeqProp ``letter_annotations`` field for a given residue number.
Uses the representative sequence, structure, and chain ID stored by default. If other properties from other
structures are desired, input the proper IDs. An alignment for the given sequence to the structure must
be present in the sequence_alignments list.
Args:
seq_resnum (int): Residue number in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
Returns:
dict: All available letter_annotations for this residue number
"""
if use_representatives:
if seqprop and structprop and chain_id:
raise ValueError('Overriding sequence, structure, and chain IDs with representatives. '
'Set use_representatives to False if custom IDs are to be used.')
else:
if not seqprop or not structprop or not chain_id:
raise ValueError('Input sequence, structure, and chain to map between, or set use_representatives '
'to True.')
if use_representatives:
seqprop = self.representative_sequence
structprop = self.representative_structure
chain_id = self.representative_chain
# log.debug('Using sequence: {}, structure: {}, chain: {}'.format(seqprop.id, structprop.id, chain_id))
# Create a new SeqFeature
f = SeqFeature(FeatureLocation(seq_resnum-1, seq_resnum))
# Get sequence properties
seq_features = f.extract(seqprop)
# Store in dictionary to return, clean it up
all_info = ssbio.utils.clean_single_dict(indict=seq_features.letter_annotations,
prepend_to_keys='seq_',
remove_keys_containing='_chain_index')
all_info['seq_resnum'] = seq_resnum
all_info['seq_residue'] = str(seq_features.seq)
if structprop:
chain = structprop.chains.get_by_id(chain_id)
# Get structure properties
mapping_to_structure_resnum = self.map_seqprop_resnums_to_structprop_resnums(resnums=seq_resnum,
seqprop=seqprop,
structprop=structprop,
chain_id=chain_id,
use_representatives=use_representatives)
# Try finding the residue in the structure
if f.location.end.position in mapping_to_structure_resnum:
struct_resnum = mapping_to_structure_resnum[f.location.end.position]
struct_f = SeqFeature(FeatureLocation(struct_resnum-1, struct_resnum))
struct_seq_features = struct_f.extract(chain.seq_record)
struct_info = ssbio.utils.clean_single_dict(indict=struct_seq_features.letter_annotations,
prepend_to_keys='struct_',
remove_keys_containing='structure_resnums')
struct_info['struct_resnum'] = struct_resnum
struct_info['struct_residue'] = str(struct_seq_features.seq)
all_info.update(struct_info)
# Warn if residue differs from sequence
if seq_features.seq != struct_seq_features.seq:
log.warning('Sequence residue ({}{}) does not match structure residue ({}{}). '
'This may simply be due to differences in the structure'.format(seq_features.seq,
seq_resnum,
struct_seq_features.seq,
struct_resnum))
return all_info | [
"def",
"get_residue_annotations",
"(",
"self",
",",
"seq_resnum",
",",
"seqprop",
"=",
"None",
",",
"structprop",
"=",
"None",
",",
"chain_id",
"=",
"None",
",",
"use_representatives",
"=",
"False",
")",
":",
"if",
"use_representatives",
":",
"if",
"seqprop",
... | Get all residue-level annotations stored in the SeqProp ``letter_annotations`` field for a given residue number.
Uses the representative sequence, structure, and chain ID stored by default. If other properties from other
structures are desired, input the proper IDs. An alignment for the given sequence to the structure must
be present in the sequence_alignments list.
Args:
seq_resnum (int): Residue number in the sequence
seqprop (SeqProp): SeqProp object
structprop (StructProp): StructProp object
chain_id (str): ID of the structure's chain to get annotation from
use_representatives (bool): If the representative sequence/structure/chain IDs should be used
Returns:
dict: All available letter_annotations for this residue number | [
"Get",
"all",
"residue",
"-",
"level",
"annotations",
"stored",
"in",
"the",
"SeqProp",
"letter_annotations",
"field",
"for",
"a",
"given",
"residue",
"number",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L2310-L2390 | train | 29,040 |
SBRG/ssbio | ssbio/core/protein.py | Protein.sequence_mutation_summary | def sequence_mutation_summary(self, alignment_ids=None, alignment_type=None):
"""Summarize all mutations found in the sequence_alignments attribute.
Returns 2 dictionaries, single_counter and fingerprint_counter.
single_counter:
Dictionary of ``{point mutation: list of genes/strains}``
Example::
{
('A', 24, 'V'): ['Strain1', 'Strain2', 'Strain4'],
('R', 33, 'T'): ['Strain2']
}
Here, we report which genes/strains have the single point mutation.
fingerprint_counter:
Dictionary of ``{mutation group: list of genes/strains}``
Example::
{
(('A', 24, 'V'), ('R', 33, 'T')): ['Strain2'],
(('A', 24, 'V')): ['Strain1', 'Strain4']
}
Here, we report which genes/strains have the specific combinations (or "fingerprints") of point mutations
Args:
alignment_ids (str, list): Specified alignment ID or IDs to use
alignment_type (str): Specified alignment type contained in the ``annotation`` field of an alignment object,
``seqalign`` or ``structalign`` are the current types.
Returns:
dict, dict: single_counter, fingerprint_counter
"""
if alignment_ids:
ssbio.utils.force_list(alignment_ids)
if len(self.sequence_alignments) == 0:
log.error('{}: no sequence alignments'.format(self.id))
return {}, {}
fingerprint_counter = defaultdict(list)
single_counter = defaultdict(list)
for alignment in self.sequence_alignments:
# Ignore alignments if a list of identifiers is provided
if alignment_ids:
if alignment.id not in alignment_ids:
continue
# Ignore alignments if type is specified
if alignment_type:
if alignment.annotations['ssbio_type'] != alignment_type:
continue
other_sequence = alignment.annotations['b_seq']
mutations = alignment.annotations['mutations']
if mutations:
# Turn this list of mutations into a tuple so it can be a dictionary key
mutations = tuple(tuple(x) for x in mutations)
fingerprint_counter[mutations].append(other_sequence)
for m in mutations:
single_counter[m].append(other_sequence)
return dict(single_counter), dict(fingerprint_counter) | python | def sequence_mutation_summary(self, alignment_ids=None, alignment_type=None):
"""Summarize all mutations found in the sequence_alignments attribute.
Returns 2 dictionaries, single_counter and fingerprint_counter.
single_counter:
Dictionary of ``{point mutation: list of genes/strains}``
Example::
{
('A', 24, 'V'): ['Strain1', 'Strain2', 'Strain4'],
('R', 33, 'T'): ['Strain2']
}
Here, we report which genes/strains have the single point mutation.
fingerprint_counter:
Dictionary of ``{mutation group: list of genes/strains}``
Example::
{
(('A', 24, 'V'), ('R', 33, 'T')): ['Strain2'],
(('A', 24, 'V')): ['Strain1', 'Strain4']
}
Here, we report which genes/strains have the specific combinations (or "fingerprints") of point mutations
Args:
alignment_ids (str, list): Specified alignment ID or IDs to use
alignment_type (str): Specified alignment type contained in the ``annotation`` field of an alignment object,
``seqalign`` or ``structalign`` are the current types.
Returns:
dict, dict: single_counter, fingerprint_counter
"""
if alignment_ids:
ssbio.utils.force_list(alignment_ids)
if len(self.sequence_alignments) == 0:
log.error('{}: no sequence alignments'.format(self.id))
return {}, {}
fingerprint_counter = defaultdict(list)
single_counter = defaultdict(list)
for alignment in self.sequence_alignments:
# Ignore alignments if a list of identifiers is provided
if alignment_ids:
if alignment.id not in alignment_ids:
continue
# Ignore alignments if type is specified
if alignment_type:
if alignment.annotations['ssbio_type'] != alignment_type:
continue
other_sequence = alignment.annotations['b_seq']
mutations = alignment.annotations['mutations']
if mutations:
# Turn this list of mutations into a tuple so it can be a dictionary key
mutations = tuple(tuple(x) for x in mutations)
fingerprint_counter[mutations].append(other_sequence)
for m in mutations:
single_counter[m].append(other_sequence)
return dict(single_counter), dict(fingerprint_counter) | [
"def",
"sequence_mutation_summary",
"(",
"self",
",",
"alignment_ids",
"=",
"None",
",",
"alignment_type",
"=",
"None",
")",
":",
"if",
"alignment_ids",
":",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"alignment_ids",
")",
"if",
"len",
"(",
"self",
".",
... | Summarize all mutations found in the sequence_alignments attribute.
Returns 2 dictionaries, single_counter and fingerprint_counter.
single_counter:
Dictionary of ``{point mutation: list of genes/strains}``
Example::
{
('A', 24, 'V'): ['Strain1', 'Strain2', 'Strain4'],
('R', 33, 'T'): ['Strain2']
}
Here, we report which genes/strains have the single point mutation.
fingerprint_counter:
Dictionary of ``{mutation group: list of genes/strains}``
Example::
{
(('A', 24, 'V'), ('R', 33, 'T')): ['Strain2'],
(('A', 24, 'V')): ['Strain1', 'Strain4']
}
Here, we report which genes/strains have the specific combinations (or "fingerprints") of point mutations
Args:
alignment_ids (str, list): Specified alignment ID or IDs to use
alignment_type (str): Specified alignment type contained in the ``annotation`` field of an alignment object,
``seqalign`` or ``structalign`` are the current types.
Returns:
dict, dict: single_counter, fingerprint_counter | [
"Summarize",
"all",
"mutations",
"found",
"in",
"the",
"sequence_alignments",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L2392-L2459 | train | 29,041 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_all_pdbflex_info | def get_all_pdbflex_info(self):
"""Gets ALL PDBFlex entries for all mapped structures, then stores the ones that match the repseq length
Ideas:
- maybe first check for quality of structure and then retrieve the pdbflex entry
- not sure which sequence is used in pdbflex
"""
# XTODO: documentation
log.debug('{}: representative sequence length'.format(self.representative_sequence.seq_len))
for s in self.get_experimental_structures():
log.debug('{};{}: chains matching protein {}'.format(s.id, s.mapped_chains, self.id))
s.download_structure_file(outdir=self.structure_dir, file_type='mmtf')
# s.parse_structure()
for c in s.mapped_chains:
# log.debug('{}: sequence length of chain {}'.format(len(s.chains.get_by_id(c).seq_record), c))
# Retrieve PDBFlex stats
stats = ssbio.databases.pdbflex.get_pdbflex_info(pdb_id=s.id, chain_id=c, outdir=self.structure_dir)
parent = stats['parentClusterID']
if parent:
# Retrieve PDBFlex representative PDBs
reps = ssbio.databases.pdbflex.get_pdbflex_representatives(pdb_id=s.id, chain_id=c, outdir=self.structure_dir)
# Store general statistics in annotations
parent_stats_key = 'structural_flexibility_stats_{}_parent-pdbflex'.format(parent)
parent_reps_key = 'structural_flexibility_representatives_{}_parent-pdbflex'.format(parent)
if parent_stats_key not in self.representative_sequence.annotations or parent_reps_key not in self.representative_sequence.annotations:
self.representative_sequence.annotations[parent_stats_key] = stats
self.representative_sequence.annotations[parent_reps_key] = reps
log.debug('{}: stored PDB Flex stats in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent))
else:
log.info(
'{}: existing PDB Flex stats already in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent))
# Retrieve PDBFlex RMSDs
rmsd = ssbio.databases.pdbflex.get_pdbflex_rmsd_profile(pdb_id=s.id, chain_id=c, outdir=self.structure_dir)
log.info('{}: sequence length reported in PDB Flex'.format(len(rmsd['profile'])))
# Store residue specific RMSDs in letter_annotations
parent_key = 'rmsd_{}_parent-pdbflex'.format(parent)
if parent_key not in self.representative_sequence.letter_annotations:
self.representative_sequence.letter_annotations[parent_key] = rmsd['profile']
log.info('{}: stored PDB Flex RMSD in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent))
else:
log.info(
'{}: existing PDB Flex RMSD already in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent)) | python | def get_all_pdbflex_info(self):
"""Gets ALL PDBFlex entries for all mapped structures, then stores the ones that match the repseq length
Ideas:
- maybe first check for quality of structure and then retrieve the pdbflex entry
- not sure which sequence is used in pdbflex
"""
# XTODO: documentation
log.debug('{}: representative sequence length'.format(self.representative_sequence.seq_len))
for s in self.get_experimental_structures():
log.debug('{};{}: chains matching protein {}'.format(s.id, s.mapped_chains, self.id))
s.download_structure_file(outdir=self.structure_dir, file_type='mmtf')
# s.parse_structure()
for c in s.mapped_chains:
# log.debug('{}: sequence length of chain {}'.format(len(s.chains.get_by_id(c).seq_record), c))
# Retrieve PDBFlex stats
stats = ssbio.databases.pdbflex.get_pdbflex_info(pdb_id=s.id, chain_id=c, outdir=self.structure_dir)
parent = stats['parentClusterID']
if parent:
# Retrieve PDBFlex representative PDBs
reps = ssbio.databases.pdbflex.get_pdbflex_representatives(pdb_id=s.id, chain_id=c, outdir=self.structure_dir)
# Store general statistics in annotations
parent_stats_key = 'structural_flexibility_stats_{}_parent-pdbflex'.format(parent)
parent_reps_key = 'structural_flexibility_representatives_{}_parent-pdbflex'.format(parent)
if parent_stats_key not in self.representative_sequence.annotations or parent_reps_key not in self.representative_sequence.annotations:
self.representative_sequence.annotations[parent_stats_key] = stats
self.representative_sequence.annotations[parent_reps_key] = reps
log.debug('{}: stored PDB Flex stats in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent))
else:
log.info(
'{}: existing PDB Flex stats already in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent))
# Retrieve PDBFlex RMSDs
rmsd = ssbio.databases.pdbflex.get_pdbflex_rmsd_profile(pdb_id=s.id, chain_id=c, outdir=self.structure_dir)
log.info('{}: sequence length reported in PDB Flex'.format(len(rmsd['profile'])))
# Store residue specific RMSDs in letter_annotations
parent_key = 'rmsd_{}_parent-pdbflex'.format(parent)
if parent_key not in self.representative_sequence.letter_annotations:
self.representative_sequence.letter_annotations[parent_key] = rmsd['profile']
log.info('{}: stored PDB Flex RMSD in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent))
else:
log.info(
'{}: existing PDB Flex RMSD already in representative sequence for PDB parent {}'.format(
self.representative_sequence.id,
parent)) | [
"def",
"get_all_pdbflex_info",
"(",
"self",
")",
":",
"# XTODO: documentation",
"log",
".",
"debug",
"(",
"'{}: representative sequence length'",
".",
"format",
"(",
"self",
".",
"representative_sequence",
".",
"seq_len",
")",
")",
"for",
"s",
"in",
"self",
".",
... | Gets ALL PDBFlex entries for all mapped structures, then stores the ones that match the repseq length
Ideas:
- maybe first check for quality of structure and then retrieve the pdbflex entry
- not sure which sequence is used in pdbflex | [
"Gets",
"ALL",
"PDBFlex",
"entries",
"for",
"all",
"mapped",
"structures",
"then",
"stores",
"the",
"ones",
"that",
"match",
"the",
"repseq",
"length"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L2872-L2930 | train | 29,042 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_generic_subseq_2D | def get_generic_subseq_2D(protein, cutoff, prop, condition):
"""Get a subsequence from REPSEQ based on a property stored in REPSEQ.letter_annotations"""
subseq, subseq_resnums = protein.representative_sequence.get_subsequence_from_property(property_key=prop,
property_value=cutoff,
condition=condition,
return_resnums=True) or (
None, [])
return {'subseq_len': len(subseq_resnums), 'subseq': subseq, 'subseq_resnums': subseq_resnums} | python | def get_generic_subseq_2D(protein, cutoff, prop, condition):
"""Get a subsequence from REPSEQ based on a property stored in REPSEQ.letter_annotations"""
subseq, subseq_resnums = protein.representative_sequence.get_subsequence_from_property(property_key=prop,
property_value=cutoff,
condition=condition,
return_resnums=True) or (
None, [])
return {'subseq_len': len(subseq_resnums), 'subseq': subseq, 'subseq_resnums': subseq_resnums} | [
"def",
"get_generic_subseq_2D",
"(",
"protein",
",",
"cutoff",
",",
"prop",
",",
"condition",
")",
":",
"subseq",
",",
"subseq_resnums",
"=",
"protein",
".",
"representative_sequence",
".",
"get_subsequence_from_property",
"(",
"property_key",
"=",
"prop",
",",
"p... | Get a subsequence from REPSEQ based on a property stored in REPSEQ.letter_annotations | [
"Get",
"a",
"subsequence",
"from",
"REPSEQ",
"based",
"on",
"a",
"property",
"stored",
"in",
"REPSEQ",
".",
"letter_annotations"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L2987-L2995 | train | 29,043 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_generic_subseq_3D | def get_generic_subseq_3D(protein, cutoff, prop, condition):
"""Get a subsequence from REPSEQ based on a property stored in REPSTRUCT.REPCHAIN.letter_annotations"""
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return {'subseq_len': 0, 'subseq': None, 'subseq_resnums': []}
subseq, subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(property_key=prop,
property_value=cutoff,
condition=condition,
use_representatives=True,
return_resnums=True) or (
None, [])
return {'subseq_len': len(subseq_resnums), 'subseq': subseq, 'subseq_resnums': subseq_resnums} | python | def get_generic_subseq_3D(protein, cutoff, prop, condition):
"""Get a subsequence from REPSEQ based on a property stored in REPSTRUCT.REPCHAIN.letter_annotations"""
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return {'subseq_len': 0, 'subseq': None, 'subseq_resnums': []}
subseq, subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(property_key=prop,
property_value=cutoff,
condition=condition,
use_representatives=True,
return_resnums=True) or (
None, [])
return {'subseq_len': len(subseq_resnums), 'subseq': subseq, 'subseq_resnums': subseq_resnums} | [
"def",
"get_generic_subseq_3D",
"(",
"protein",
",",
"cutoff",
",",
"prop",
",",
"condition",
")",
":",
"if",
"not",
"protein",
".",
"representative_structure",
":",
"log",
".",
"error",
"(",
"'{}: no representative structure, cannot search for subseq'",
".",
"format"... | Get a subsequence from REPSEQ based on a property stored in REPSTRUCT.REPCHAIN.letter_annotations | [
"Get",
"a",
"subsequence",
"from",
"REPSEQ",
"based",
"on",
"a",
"property",
"stored",
"in",
"REPSTRUCT",
".",
"REPCHAIN",
".",
"letter_annotations"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L2997-L3010 | train | 29,044 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_combo_subseq_within_2_5D | def get_combo_subseq_within_2_5D(protein, props, within, filter_resnums=None):
"""Get a subsequence from REPSEQ based on multiple features stored in REPSEQ and within the set distance in REPSTRUCT.REPCHAIN"""
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return {'subseq_len': 0, 'subseq': None, 'subseq_resnums': []}
all_resnums = []
for prop in props:
tmp_results = protein.get_generic_subseq_within_2_5D(prop_name=prop, within=within,
filter_resnums=filter_resnums)
all_resnums.extend(tmp_results['subseq_resnums'])
final_resnums = list(set(all_resnums))
sub_id = '{}-{}->{}_within_{}_{}_extracted'.format(protein.representative_structure.id,
protein.representative_chain,
protein.representative_sequence.id,
within, props)
new_sp = protein.representative_sequence.get_subsequence(resnums=final_resnums, new_id=sub_id,
copy_letter_annotations=False)
return {'subseq_len': len(final_resnums), 'subseq': new_sp, 'subseq_resnums': final_resnums} | python | def get_combo_subseq_within_2_5D(protein, props, within, filter_resnums=None):
"""Get a subsequence from REPSEQ based on multiple features stored in REPSEQ and within the set distance in REPSTRUCT.REPCHAIN"""
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return {'subseq_len': 0, 'subseq': None, 'subseq_resnums': []}
all_resnums = []
for prop in props:
tmp_results = protein.get_generic_subseq_within_2_5D(prop_name=prop, within=within,
filter_resnums=filter_resnums)
all_resnums.extend(tmp_results['subseq_resnums'])
final_resnums = list(set(all_resnums))
sub_id = '{}-{}->{}_within_{}_{}_extracted'.format(protein.representative_structure.id,
protein.representative_chain,
protein.representative_sequence.id,
within, props)
new_sp = protein.representative_sequence.get_subsequence(resnums=final_resnums, new_id=sub_id,
copy_letter_annotations=False)
return {'subseq_len': len(final_resnums), 'subseq': new_sp, 'subseq_resnums': final_resnums} | [
"def",
"get_combo_subseq_within_2_5D",
"(",
"protein",
",",
"props",
",",
"within",
",",
"filter_resnums",
"=",
"None",
")",
":",
"if",
"not",
"protein",
".",
"representative_structure",
":",
"log",
".",
"error",
"(",
"'{}: no representative structure, cannot search f... | Get a subsequence from REPSEQ based on multiple features stored in REPSEQ and within the set distance in REPSTRUCT.REPCHAIN | [
"Get",
"a",
"subsequence",
"from",
"REPSEQ",
"based",
"on",
"multiple",
"features",
"stored",
"in",
"REPSEQ",
"and",
"within",
"the",
"set",
"distance",
"in",
"REPSTRUCT",
".",
"REPCHAIN"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L3093-L3111 | train | 29,045 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_surface_subseq_3D | def get_surface_subseq_3D(protein,
depth_prop='RES_DEPTH-msms', depth_cutoff=2.5, depth_condition='<',
acc_prop='RSA_ALL-freesasa_het', acc_cutoff=25, acc_condition='>'):
"""SURFACE 3D = NOTDEEP + ACC"""
empty = {'surface_3D': {'subseq_len' : 0, 'subseq': None,
'subseq_resnums': []},
'notdeep_3D': {'subseq_len' : 0, 'subseq': None,
'subseq_resnums': []},
'acc_3D' : {'subseq_len' : 0, 'subseq': None,
'subseq_resnums': []}}
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return empty
notdeep_subseq, notdeep_subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(
property_key=depth_prop,
property_value=depth_cutoff,
condition=depth_condition,
use_representatives=True,
return_resnums=True) or (None, [])
acc_subseq, acc_subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(
property_key=acc_prop,
property_value=acc_cutoff,
condition=acc_condition,
use_representatives=True,
return_resnums=True) or (None, [])
surface_subseq_resnums = list(set(notdeep_subseq_resnums).intersection(acc_subseq_resnums))
surface_subseq = protein.representative_sequence.get_subsequence(surface_subseq_resnums)
all_info = {'surface_3D': {'subseq_len' : len(surface_subseq_resnums), 'subseq': surface_subseq,
'subseq_resnums': surface_subseq_resnums},
'notdeep_3D': {'subseq_len' : len(notdeep_subseq_resnums), 'subseq': notdeep_subseq,
'subseq_resnums': notdeep_subseq_resnums},
'acc_3D' : {'subseq_len' : len(acc_subseq_resnums), 'subseq': acc_subseq,
'subseq_resnums': acc_subseq_resnums}}
return all_info | python | def get_surface_subseq_3D(protein,
depth_prop='RES_DEPTH-msms', depth_cutoff=2.5, depth_condition='<',
acc_prop='RSA_ALL-freesasa_het', acc_cutoff=25, acc_condition='>'):
"""SURFACE 3D = NOTDEEP + ACC"""
empty = {'surface_3D': {'subseq_len' : 0, 'subseq': None,
'subseq_resnums': []},
'notdeep_3D': {'subseq_len' : 0, 'subseq': None,
'subseq_resnums': []},
'acc_3D' : {'subseq_len' : 0, 'subseq': None,
'subseq_resnums': []}}
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return empty
notdeep_subseq, notdeep_subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(
property_key=depth_prop,
property_value=depth_cutoff,
condition=depth_condition,
use_representatives=True,
return_resnums=True) or (None, [])
acc_subseq, acc_subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(
property_key=acc_prop,
property_value=acc_cutoff,
condition=acc_condition,
use_representatives=True,
return_resnums=True) or (None, [])
surface_subseq_resnums = list(set(notdeep_subseq_resnums).intersection(acc_subseq_resnums))
surface_subseq = protein.representative_sequence.get_subsequence(surface_subseq_resnums)
all_info = {'surface_3D': {'subseq_len' : len(surface_subseq_resnums), 'subseq': surface_subseq,
'subseq_resnums': surface_subseq_resnums},
'notdeep_3D': {'subseq_len' : len(notdeep_subseq_resnums), 'subseq': notdeep_subseq,
'subseq_resnums': notdeep_subseq_resnums},
'acc_3D' : {'subseq_len' : len(acc_subseq_resnums), 'subseq': acc_subseq,
'subseq_resnums': acc_subseq_resnums}}
return all_info | [
"def",
"get_surface_subseq_3D",
"(",
"protein",
",",
"depth_prop",
"=",
"'RES_DEPTH-msms'",
",",
"depth_cutoff",
"=",
"2.5",
",",
"depth_condition",
"=",
"'<'",
",",
"acc_prop",
"=",
"'RSA_ALL-freesasa_het'",
",",
"acc_cutoff",
"=",
"25",
",",
"acc_condition",
"="... | SURFACE 3D = NOTDEEP + ACC | [
"SURFACE",
"3D",
"=",
"NOTDEEP",
"+",
"ACC"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L3113-L3150 | train | 29,046 |
SBRG/ssbio | ssbio/core/protein.py | Protein.get_disorder_subseq_3D | def get_disorder_subseq_3D(protein,
pdbflex_keys_file,
disorder_cutoff=2,
disorder_condition='>'):
"""DISORDERED REGION 3D"""
with open(pdbflex_keys_file, 'r') as f:
pdbflex_keys = json.load(f)
if protein.id not in pdbflex_keys:
log.warning('{}: no PDBFlex info available'.format(protein.id))
final_repseq_sub, final_repseq_sub_resnums = (None, [])
else:
# Gather disordered regions for all mapped PDBFlex keys -- gets maximum disorder
# TODO: should add option to do the opposite (get consensus disorder)
repseq_sub_resnums_all = []
for disorder_prop in pdbflex_keys[protein.id]:
repseq_sub_raw, repseq_sub_resnums_raw = protein.representative_sequence.get_subsequence_from_property(
property_key=disorder_prop,
property_value=disorder_cutoff,
condition=disorder_condition,
return_resnums=True) or (None, [])
repseq_sub_resnums_all.extend(repseq_sub_resnums_raw)
final_repseq_sub_resnums = list(set(repseq_sub_resnums_all))
final_repseq_sub = protein.representative_sequence.get_subsequence(resnums=final_repseq_sub_resnums)
return {'subseq_len' : len(final_repseq_sub_resnums), 'subseq': final_repseq_sub,
'subseq_resnums': final_repseq_sub_resnums} | python | def get_disorder_subseq_3D(protein,
pdbflex_keys_file,
disorder_cutoff=2,
disorder_condition='>'):
"""DISORDERED REGION 3D"""
with open(pdbflex_keys_file, 'r') as f:
pdbflex_keys = json.load(f)
if protein.id not in pdbflex_keys:
log.warning('{}: no PDBFlex info available'.format(protein.id))
final_repseq_sub, final_repseq_sub_resnums = (None, [])
else:
# Gather disordered regions for all mapped PDBFlex keys -- gets maximum disorder
# TODO: should add option to do the opposite (get consensus disorder)
repseq_sub_resnums_all = []
for disorder_prop in pdbflex_keys[protein.id]:
repseq_sub_raw, repseq_sub_resnums_raw = protein.representative_sequence.get_subsequence_from_property(
property_key=disorder_prop,
property_value=disorder_cutoff,
condition=disorder_condition,
return_resnums=True) or (None, [])
repseq_sub_resnums_all.extend(repseq_sub_resnums_raw)
final_repseq_sub_resnums = list(set(repseq_sub_resnums_all))
final_repseq_sub = protein.representative_sequence.get_subsequence(resnums=final_repseq_sub_resnums)
return {'subseq_len' : len(final_repseq_sub_resnums), 'subseq': final_repseq_sub,
'subseq_resnums': final_repseq_sub_resnums} | [
"def",
"get_disorder_subseq_3D",
"(",
"protein",
",",
"pdbflex_keys_file",
",",
"disorder_cutoff",
"=",
"2",
",",
"disorder_condition",
"=",
"'>'",
")",
":",
"with",
"open",
"(",
"pdbflex_keys_file",
",",
"'r'",
")",
"as",
"f",
":",
"pdbflex_keys",
"=",
"json"... | DISORDERED REGION 3D | [
"DISORDERED",
"REGION",
"3D"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L3165-L3193 | train | 29,047 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | parse_init_dat | def parse_init_dat(infile):
"""Parse the main init.dat file which contains the modeling results
The first line of the file init.dat contains stuff like::
"120 easy 40 8"
The other lines look like this::
" 161 11.051 1 1guqA MUSTER"
and getting the first 10 gives you the top 10 templates used in modeling
Args:
infile (stt): Path to init.dat
Returns:
dict: Dictionary of parsed information
"""
# TODO: would be nice to get top 10 templates instead of just the top
init_dict = {}
log.debug('{}: reading file...'.format(infile))
with open(infile, 'r') as f:
# Get first 2 lines of file
head = [next(f).strip() for x in range(2)]
summary = head[0].split()
difficulty = summary[1]
top_template_info = head[1].split()
top_template_pdbchain = top_template_info[3]
top_template_pdb = top_template_pdbchain[:4]
top_template_chain = top_template_pdbchain[4:]
init_dict['difficulty'] = difficulty
init_dict['top_template_pdb'] = top_template_pdb
init_dict['top_template_chain'] = top_template_chain
return init_dict | python | def parse_init_dat(infile):
"""Parse the main init.dat file which contains the modeling results
The first line of the file init.dat contains stuff like::
"120 easy 40 8"
The other lines look like this::
" 161 11.051 1 1guqA MUSTER"
and getting the first 10 gives you the top 10 templates used in modeling
Args:
infile (stt): Path to init.dat
Returns:
dict: Dictionary of parsed information
"""
# TODO: would be nice to get top 10 templates instead of just the top
init_dict = {}
log.debug('{}: reading file...'.format(infile))
with open(infile, 'r') as f:
# Get first 2 lines of file
head = [next(f).strip() for x in range(2)]
summary = head[0].split()
difficulty = summary[1]
top_template_info = head[1].split()
top_template_pdbchain = top_template_info[3]
top_template_pdb = top_template_pdbchain[:4]
top_template_chain = top_template_pdbchain[4:]
init_dict['difficulty'] = difficulty
init_dict['top_template_pdb'] = top_template_pdb
init_dict['top_template_chain'] = top_template_chain
return init_dict | [
"def",
"parse_init_dat",
"(",
"infile",
")",
":",
"# TODO: would be nice to get top 10 templates instead of just the top",
"init_dict",
"=",
"{",
"}",
"log",
".",
"debug",
"(",
"'{}: reading file...'",
".",
"format",
"(",
"infile",
")",
")",
"with",
"open",
"(",
"in... | Parse the main init.dat file which contains the modeling results
The first line of the file init.dat contains stuff like::
"120 easy 40 8"
The other lines look like this::
" 161 11.051 1 1guqA MUSTER"
and getting the first 10 gives you the top 10 templates used in modeling
Args:
infile (stt): Path to init.dat
Returns:
dict: Dictionary of parsed information | [
"Parse",
"the",
"main",
"init",
".",
"dat",
"file",
"which",
"contains",
"the",
"modeling",
"results"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L289-L330 | train | 29,048 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | parse_cscore | def parse_cscore(infile):
"""Parse the cscore file to return a dictionary of scores.
Args:
infile (str): Path to cscore
Returns:
dict: Dictionary of scores
"""
cscore_dict = {}
with open(infile, 'r') as f:
for ll in f.readlines():
# Look for the first line that starts with model1
if ll.lower().startswith('model1'):
l = ll.split()
cscore = l[1]
tmscore_full = l[2].split('+-')
tmscore = tmscore_full[0]
tmscore_err = tmscore_full[1]
rmsd_full = l[3].split('+-')
rmsd = rmsd_full[0]
rmsd_err = rmsd_full[1]
cscore_dict['c_score'] = float(cscore)
cscore_dict['tm_score'] = float(tmscore)
cscore_dict['tm_score_err'] = float(tmscore_err)
cscore_dict['rmsd'] = float(rmsd)
cscore_dict['rmsd_err'] = float(rmsd_err)
return cscore_dict | python | def parse_cscore(infile):
"""Parse the cscore file to return a dictionary of scores.
Args:
infile (str): Path to cscore
Returns:
dict: Dictionary of scores
"""
cscore_dict = {}
with open(infile, 'r') as f:
for ll in f.readlines():
# Look for the first line that starts with model1
if ll.lower().startswith('model1'):
l = ll.split()
cscore = l[1]
tmscore_full = l[2].split('+-')
tmscore = tmscore_full[0]
tmscore_err = tmscore_full[1]
rmsd_full = l[3].split('+-')
rmsd = rmsd_full[0]
rmsd_err = rmsd_full[1]
cscore_dict['c_score'] = float(cscore)
cscore_dict['tm_score'] = float(tmscore)
cscore_dict['tm_score_err'] = float(tmscore_err)
cscore_dict['rmsd'] = float(rmsd)
cscore_dict['rmsd_err'] = float(rmsd_err)
return cscore_dict | [
"def",
"parse_cscore",
"(",
"infile",
")",
":",
"cscore_dict",
"=",
"{",
"}",
"with",
"open",
"(",
"infile",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"ll",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"# Look for the first line that starts with model1",
"if"... | Parse the cscore file to return a dictionary of scores.
Args:
infile (str): Path to cscore
Returns:
dict: Dictionary of scores | [
"Parse",
"the",
"cscore",
"file",
"to",
"return",
"a",
"dictionary",
"of",
"scores",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L381-L414 | train | 29,049 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | parse_coach_bsites_inf | def parse_coach_bsites_inf(infile):
"""Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions
Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished
For each site (cluster), there are three lines:
- Line 1: site number, c-score of coach prediction, cluster size
- Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates),
c-score of the algorithm's prediction, binding residues from single template
- Line 3: Statistics of ligands in the cluster
C-score information:
- "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below
0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf)
Args:
infile (str): Path to Bsites.inf
Returns:
list: Ranked list of dictionaries, keys defined below
- ``site_num``: cluster which is the consensus binding site
- ``c_score``: confidence score of the cluster prediction
- ``cluster_size``: number of predictions within this cluster
- ``algorithm``: main? algorithm used to make the prediction
- ``pdb_template_id``: PDB ID of the template used to make the prediction
- ``pdb_template_chain``: chain of the PDB which has the ligand
- ``pdb_ligand``: predicted ligand to bind
- ``binding_location_coords``: centroid of the predicted ligand position in the homology model
- ``c_score_method``: confidence score for the main algorithm
- ``binding_residues``: predicted residues to bind the ligand
- ``ligand_cluster_counts``: number of predictions per ligand
"""
bsites_results = []
with open(infile) as pp:
lines = list(filter(None, (line.rstrip() for line in pp)))
for i in range(len(lines) // 3):
bsites_site_dict = {}
line1 = lines[i * 3].split('\t')
line2 = lines[i * 3 + 1].split('\t')
line3 = lines[i * 3 + 2]
bsites_site_dict['site_num'] = line1[0]
bsites_site_dict['c_score'] = float(line1[1])
bsites_site_dict['cluster_size'] = line1[2]
bsites_site_dict['algorithm'] = line2[0]
bsites_site_dict['pdb_template_id'] = line2[1][:4]
bsites_site_dict['pdb_template_chain'] = line2[1][4]
bsites_site_dict['pdb_ligand'] = line2[2]
bsites_site_dict['binding_location_coords'] = tuple(float(x) for x in line2[3].split())
# TODO: what's the difference between this c-score and the cluster's c-score?
# how is the cluster's c-score computed? it's not the average c-score of all methods
# also why are some COFACTOR c-scores >1?
# 160411 - seems like the COFACTOR "BS-score" is being reported here, not its c-score...
tmp_split = line2[4].split(' :')
bsites_site_dict['c_score_method'] = tmp_split[0]
bsites_site_dict['binding_residues'] = tmp_split[1]
bsites_site_dict['ligand_cluster_counts'] = line3
bsites_results.append(bsites_site_dict)
return bsites_results | python | def parse_coach_bsites_inf(infile):
"""Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions
Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished
For each site (cluster), there are three lines:
- Line 1: site number, c-score of coach prediction, cluster size
- Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates),
c-score of the algorithm's prediction, binding residues from single template
- Line 3: Statistics of ligands in the cluster
C-score information:
- "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below
0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf)
Args:
infile (str): Path to Bsites.inf
Returns:
list: Ranked list of dictionaries, keys defined below
- ``site_num``: cluster which is the consensus binding site
- ``c_score``: confidence score of the cluster prediction
- ``cluster_size``: number of predictions within this cluster
- ``algorithm``: main? algorithm used to make the prediction
- ``pdb_template_id``: PDB ID of the template used to make the prediction
- ``pdb_template_chain``: chain of the PDB which has the ligand
- ``pdb_ligand``: predicted ligand to bind
- ``binding_location_coords``: centroid of the predicted ligand position in the homology model
- ``c_score_method``: confidence score for the main algorithm
- ``binding_residues``: predicted residues to bind the ligand
- ``ligand_cluster_counts``: number of predictions per ligand
"""
bsites_results = []
with open(infile) as pp:
lines = list(filter(None, (line.rstrip() for line in pp)))
for i in range(len(lines) // 3):
bsites_site_dict = {}
line1 = lines[i * 3].split('\t')
line2 = lines[i * 3 + 1].split('\t')
line3 = lines[i * 3 + 2]
bsites_site_dict['site_num'] = line1[0]
bsites_site_dict['c_score'] = float(line1[1])
bsites_site_dict['cluster_size'] = line1[2]
bsites_site_dict['algorithm'] = line2[0]
bsites_site_dict['pdb_template_id'] = line2[1][:4]
bsites_site_dict['pdb_template_chain'] = line2[1][4]
bsites_site_dict['pdb_ligand'] = line2[2]
bsites_site_dict['binding_location_coords'] = tuple(float(x) for x in line2[3].split())
# TODO: what's the difference between this c-score and the cluster's c-score?
# how is the cluster's c-score computed? it's not the average c-score of all methods
# also why are some COFACTOR c-scores >1?
# 160411 - seems like the COFACTOR "BS-score" is being reported here, not its c-score...
tmp_split = line2[4].split(' :')
bsites_site_dict['c_score_method'] = tmp_split[0]
bsites_site_dict['binding_residues'] = tmp_split[1]
bsites_site_dict['ligand_cluster_counts'] = line3
bsites_results.append(bsites_site_dict)
return bsites_results | [
"def",
"parse_coach_bsites_inf",
"(",
"infile",
")",
":",
"bsites_results",
"=",
"[",
"]",
"with",
"open",
"(",
"infile",
")",
"as",
"pp",
":",
"lines",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"(",
"line",
".",
"rstrip",
"(",
")",
"for",
"line"... | Parse the Bsites.inf output file of COACH and return a list of rank-ordered binding site predictions
Bsites.inf contains the summary of COACH clustering results after all other prediction algorithms have finished
For each site (cluster), there are three lines:
- Line 1: site number, c-score of coach prediction, cluster size
- Line 2: algorithm, PDB ID, ligand ID, center of binding site (cartesian coordinates),
c-score of the algorithm's prediction, binding residues from single template
- Line 3: Statistics of ligands in the cluster
C-score information:
- "In our training data, a prediction with C-score>0.35 has average false positive and false negative rates below
0.16 and 0.13, respectively." (https://zhanglab.ccmb.med.umich.edu/COACH/COACH.pdf)
Args:
infile (str): Path to Bsites.inf
Returns:
list: Ranked list of dictionaries, keys defined below
- ``site_num``: cluster which is the consensus binding site
- ``c_score``: confidence score of the cluster prediction
- ``cluster_size``: number of predictions within this cluster
- ``algorithm``: main? algorithm used to make the prediction
- ``pdb_template_id``: PDB ID of the template used to make the prediction
- ``pdb_template_chain``: chain of the PDB which has the ligand
- ``pdb_ligand``: predicted ligand to bind
- ``binding_location_coords``: centroid of the predicted ligand position in the homology model
- ``c_score_method``: confidence score for the main algorithm
- ``binding_residues``: predicted residues to bind the ligand
- ``ligand_cluster_counts``: number of predictions per ligand | [
"Parse",
"the",
"Bsites",
".",
"inf",
"output",
"file",
"of",
"COACH",
"and",
"return",
"a",
"list",
"of",
"rank",
"-",
"ordered",
"binding",
"site",
"predictions"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L417-L487 | train | 29,050 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | parse_coach_ec_df | def parse_coach_ec_df(infile):
"""Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions
"""
ec_df = pd.read_table(infile, delim_whitespace=True,
names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage',
'c_score', 'ec_number', 'binding_residues'])
ec_df['pdb_template_id'] = ec_df['pdb_template'].apply(lambda x: x[:4])
ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply(lambda x: x[4])
ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd',
'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]
ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')
return ec_df | python | def parse_coach_ec_df(infile):
"""Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions
"""
ec_df = pd.read_table(infile, delim_whitespace=True,
names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage',
'c_score', 'ec_number', 'binding_residues'])
ec_df['pdb_template_id'] = ec_df['pdb_template'].apply(lambda x: x[:4])
ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply(lambda x: x[4])
ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd',
'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]
ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')
return ec_df | [
"def",
"parse_coach_ec_df",
"(",
"infile",
")",
":",
"ec_df",
"=",
"pd",
".",
"read_table",
"(",
"infile",
",",
"delim_whitespace",
"=",
"True",
",",
"names",
"=",
"[",
"'pdb_template'",
",",
"'tm_score'",
",",
"'rmsd'",
",",
"'seq_ident'",
",",
"'seq_covera... | Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions | [
"Parse",
"the",
"EC",
".",
"dat",
"output",
"file",
"of",
"COACH",
"and",
"return",
"a",
"dataframe",
"of",
"results"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L490-L516 | train | 29,051 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | parse_coach_go | def parse_coach_go(infile):
"""Parse a GO output file from COACH and return a rank-ordered list of GO term predictions
The columns in all files are: GO terms, Confidence score, Name of GO terms. The files are:
- GO_MF.dat - GO terms in 'molecular function'
- GO_BP.dat - GO terms in 'biological process'
- GO_CC.dat - GO terms in 'cellular component'
Args:
infile (str): Path to any COACH GO prediction file
Returns:
Pandas DataFrame: Organized dataframe of results, columns defined below
- ``go_id``: GO term ID
- ``go_term``: GO term text
- ``c_score``: confidence score of the GO prediction
"""
go_list = []
with open(infile) as go_file:
for line in go_file.readlines():
go_dict = {}
go_split = line.split()
go_dict['go_id'] = go_split[0]
go_dict['c_score'] = go_split[1]
go_dict['go_term'] = ' '.join(go_split[2:])
go_list.append(go_dict)
return go_list | python | def parse_coach_go(infile):
"""Parse a GO output file from COACH and return a rank-ordered list of GO term predictions
The columns in all files are: GO terms, Confidence score, Name of GO terms. The files are:
- GO_MF.dat - GO terms in 'molecular function'
- GO_BP.dat - GO terms in 'biological process'
- GO_CC.dat - GO terms in 'cellular component'
Args:
infile (str): Path to any COACH GO prediction file
Returns:
Pandas DataFrame: Organized dataframe of results, columns defined below
- ``go_id``: GO term ID
- ``go_term``: GO term text
- ``c_score``: confidence score of the GO prediction
"""
go_list = []
with open(infile) as go_file:
for line in go_file.readlines():
go_dict = {}
go_split = line.split()
go_dict['go_id'] = go_split[0]
go_dict['c_score'] = go_split[1]
go_dict['go_term'] = ' '.join(go_split[2:])
go_list.append(go_dict)
return go_list | [
"def",
"parse_coach_go",
"(",
"infile",
")",
":",
"go_list",
"=",
"[",
"]",
"with",
"open",
"(",
"infile",
")",
"as",
"go_file",
":",
"for",
"line",
"in",
"go_file",
".",
"readlines",
"(",
")",
":",
"go_dict",
"=",
"{",
"}",
"go_split",
"=",
"line",
... | Parse a GO output file from COACH and return a rank-ordered list of GO term predictions
The columns in all files are: GO terms, Confidence score, Name of GO terms. The files are:
- GO_MF.dat - GO terms in 'molecular function'
- GO_BP.dat - GO terms in 'biological process'
- GO_CC.dat - GO terms in 'cellular component'
Args:
infile (str): Path to any COACH GO prediction file
Returns:
Pandas DataFrame: Organized dataframe of results, columns defined below
- ``go_id``: GO term ID
- ``go_term``: GO term text
- ``c_score``: confidence score of the GO prediction | [
"Parse",
"a",
"GO",
"output",
"file",
"from",
"COACH",
"and",
"return",
"a",
"rank",
"-",
"ordered",
"list",
"of",
"GO",
"term",
"predictions"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L546-L579 | train | 29,052 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | ITASSERProp.copy_results | def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):
"""Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten.
"""
# Save path to the structure and copy it if specified
if not rename_model_to:
rename_model_to = self.model_to_use
new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))
if self.structure_path:
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):
# Clean and save it
custom_clean = CleanPDB()
my_pdb = StructureIO(self.structure_path)
new_model_path = my_pdb.write_pdb(custom_selection=custom_clean,
custom_name=rename_model_to,
out_dir=copy_to_dir,
force_rerun=force_rerun)
# Update the structure_path to be the new clean file
self.load_structure_path(structure_path=new_model_path, file_type='pdb')
# Other modeling results - store in a new folder
dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))
if not op.exists(dest_itasser_dir):
os.mkdir(dest_itasser_dir)
for attr in self._attrs_to_copy:
old_file_path = getattr(self, attr)
new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):
shutil.copy2(old_file_path, new_file_path)
log.debug('{}: copied from {}'.format(new_file_path, old_file_path))
else:
log.debug('{}: file already exists'.format(new_file_path))
setattr(self, attr, new_file_path) | python | def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):
"""Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten.
"""
# Save path to the structure and copy it if specified
if not rename_model_to:
rename_model_to = self.model_to_use
new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))
if self.structure_path:
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):
# Clean and save it
custom_clean = CleanPDB()
my_pdb = StructureIO(self.structure_path)
new_model_path = my_pdb.write_pdb(custom_selection=custom_clean,
custom_name=rename_model_to,
out_dir=copy_to_dir,
force_rerun=force_rerun)
# Update the structure_path to be the new clean file
self.load_structure_path(structure_path=new_model_path, file_type='pdb')
# Other modeling results - store in a new folder
dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))
if not op.exists(dest_itasser_dir):
os.mkdir(dest_itasser_dir)
for attr in self._attrs_to_copy:
old_file_path = getattr(self, attr)
new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):
shutil.copy2(old_file_path, new_file_path)
log.debug('{}: copied from {}'.format(new_file_path, old_file_path))
else:
log.debug('{}: file already exists'.format(new_file_path))
setattr(self, attr, new_file_path) | [
"def",
"copy_results",
"(",
"self",
",",
"copy_to_dir",
",",
"rename_model_to",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"# Save path to the structure and copy it if specified",
"if",
"not",
"rename_model_to",
":",
"rename_model_to",
"=",
"self",
".",
... | Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten. | [
"Copy",
"the",
"raw",
"information",
"from",
"I",
"-",
"TASSER",
"modeling",
"to",
"a",
"new",
"folder",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L153-L196 | train | 29,053 |
SBRG/ssbio | ssbio/protein/structure/homology/itasser/itasserprop.py | ITASSERProp.get_dict | def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
to_exclude = ['coach_bsites', 'coach_ec', 'coach_go_mf', 'coach_go_bp', 'coach_go_cc']
if not exclude_attributes:
excluder = to_exclude
else:
excluder = ssbio.utils.force_list(exclude_attributes)
excluder.extend(to_exclude)
summary_dict = StructProp.get_dict(self, only_attributes=only_attributes,
exclude_attributes=excluder,
df_format=df_format)
if self.coach_bsites:
tmp = {'top_bsite_' + k:v for k, v in self.coach_bsites[0].items()}
summary_dict.update(tmp)
if self.coach_ec:
tmp = {'top_ec_' + k: v for k, v in self.coach_ec[0].items()}
summary_dict.update(tmp)
if self.coach_go_mf:
tmp = {'top_go_mf_' + k: v for k, v in self.coach_go_mf[0].items()}
summary_dict.update(tmp)
if self.coach_go_bp:
tmp = {'top_go_bp_' + k: v for k, v in self.coach_go_bp[0].items()}
summary_dict.update(tmp)
if self.coach_go_cc:
tmp = {'top_go_cc_' + k: v for k, v in self.coach_go_cc[0].items()}
summary_dict.update(tmp)
return summary_dict | python | def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
to_exclude = ['coach_bsites', 'coach_ec', 'coach_go_mf', 'coach_go_bp', 'coach_go_cc']
if not exclude_attributes:
excluder = to_exclude
else:
excluder = ssbio.utils.force_list(exclude_attributes)
excluder.extend(to_exclude)
summary_dict = StructProp.get_dict(self, only_attributes=only_attributes,
exclude_attributes=excluder,
df_format=df_format)
if self.coach_bsites:
tmp = {'top_bsite_' + k:v for k, v in self.coach_bsites[0].items()}
summary_dict.update(tmp)
if self.coach_ec:
tmp = {'top_ec_' + k: v for k, v in self.coach_ec[0].items()}
summary_dict.update(tmp)
if self.coach_go_mf:
tmp = {'top_go_mf_' + k: v for k, v in self.coach_go_mf[0].items()}
summary_dict.update(tmp)
if self.coach_go_bp:
tmp = {'top_go_bp_' + k: v for k, v in self.coach_go_bp[0].items()}
summary_dict.update(tmp)
if self.coach_go_cc:
tmp = {'top_go_cc_' + k: v for k, v in self.coach_go_cc[0].items()}
summary_dict.update(tmp)
return summary_dict | [
"def",
"get_dict",
"(",
"self",
",",
"only_attributes",
"=",
"None",
",",
"exclude_attributes",
"=",
"None",
",",
"df_format",
"=",
"False",
")",
":",
"to_exclude",
"=",
"[",
"'coach_bsites'",
",",
"'coach_ec'",
",",
"'coach_go_mf'",
",",
"'coach_go_bp'",
",",... | Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes | [
"Summarize",
"the",
"I",
"-",
"TASSER",
"run",
"in",
"a",
"dictionary",
"containing",
"modeling",
"results",
"and",
"top",
"predictions",
"from",
"COACH"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L240-L286 | train | 29,054 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.load_cobra_model | def load_cobra_model(self, model):
"""Load a COBRApy Model object into the GEM-PRO project.
Args:
model (Model): COBRApy ``Model`` object
"""
self.model = ModelPro(model)
for g in self.model.genes:
if self.genes_dir:
g.root_dir = self.genes_dir
g.protein.pdb_file_type = self.pdb_file_type
self.genes = self.model.genes
log.info('{}: loaded model'.format(model.id))
log.info('{}: number of reactions'.format(len(self.model.reactions)))
log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))
log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model,
custom_spont_id=self.custom_spont_id)))
log.info('{}: number of metabolites'.format(len(self.model.metabolites)))
log.warning('IMPORTANT: All Gene objects have been transformed into GenePro '
'objects, and will be for any new ones') | python | def load_cobra_model(self, model):
"""Load a COBRApy Model object into the GEM-PRO project.
Args:
model (Model): COBRApy ``Model`` object
"""
self.model = ModelPro(model)
for g in self.model.genes:
if self.genes_dir:
g.root_dir = self.genes_dir
g.protein.pdb_file_type = self.pdb_file_type
self.genes = self.model.genes
log.info('{}: loaded model'.format(model.id))
log.info('{}: number of reactions'.format(len(self.model.reactions)))
log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))
log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model,
custom_spont_id=self.custom_spont_id)))
log.info('{}: number of metabolites'.format(len(self.model.metabolites)))
log.warning('IMPORTANT: All Gene objects have been transformed into GenePro '
'objects, and will be for any new ones') | [
"def",
"load_cobra_model",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"model",
"=",
"ModelPro",
"(",
"model",
")",
"for",
"g",
"in",
"self",
".",
"model",
".",
"genes",
":",
"if",
"self",
".",
"genes_dir",
":",
"g",
".",
"root_dir",
"=",
"se... | Load a COBRApy Model object into the GEM-PRO project.
Args:
model (Model): COBRApy ``Model`` object | [
"Load",
"a",
"COBRApy",
"Model",
"object",
"into",
"the",
"GEM",
"-",
"PRO",
"project",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L239-L260 | train | 29,055 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.add_gene_ids | def add_gene_ids(self, genes_list):
"""Add gene IDs manually into the GEM-PRO project.
Args:
genes_list (list): List of gene IDs as strings.
"""
orig_num_genes = len(self.genes)
for g in list(set(genes_list)):
if not self.genes.has_id(g):
new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
if self.model:
self.model.genes.append(new_gene)
else:
self.genes.append(new_gene)
log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes)) | python | def add_gene_ids(self, genes_list):
"""Add gene IDs manually into the GEM-PRO project.
Args:
genes_list (list): List of gene IDs as strings.
"""
orig_num_genes = len(self.genes)
for g in list(set(genes_list)):
if not self.genes.has_id(g):
new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
if self.model:
self.model.genes.append(new_gene)
else:
self.genes.append(new_gene)
log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes)) | [
"def",
"add_gene_ids",
"(",
"self",
",",
"genes_list",
")",
":",
"orig_num_genes",
"=",
"len",
"(",
"self",
".",
"genes",
")",
"for",
"g",
"in",
"list",
"(",
"set",
"(",
"genes_list",
")",
")",
":",
"if",
"not",
"self",
".",
"genes",
".",
"has_id",
... | Add gene IDs manually into the GEM-PRO project.
Args:
genes_list (list): List of gene IDs as strings. | [
"Add",
"gene",
"IDs",
"manually",
"into",
"the",
"GEM",
"-",
"PRO",
"project",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L320-L337 | train | 29,056 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.uniprot_mapping_and_metadata | def uniprot_mapping_and_metadata(self, model_gene_source, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to UniProt IDs using the UniProt mapping service.
Also download all metadata and sequences.
Args:
model_gene_source (str): the database source of your model gene IDs.
See: http://www.uniprot.org/help/api_idmapping
Common model gene sources are:
* Ensembl Genomes - ``ENSEMBLGENOME_ID`` (i.e. E. coli b-numbers)
* Entrez Gene (GeneID) - ``P_ENTREZGENEID``
* RefSeq Protein - ``P_REFSEQ_AC``
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model genes.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# Allow model gene --> custom ID mapping ({'TM_1012':'TM1012'})
if custom_gene_mapping:
genes_to_map = list(custom_gene_mapping.values())
else:
genes_to_map = [x.id for x in self.genes]
# Map all IDs first to available UniProts
genes_to_uniprots = bs_unip.mapping(fr=model_gene_source, to='ACC', query=genes_to_map)
successfully_mapped_counter = 0
for g in tqdm(self.genes):
if custom_gene_mapping and g.id in custom_gene_mapping.keys():
uniprot_gene = custom_gene_mapping[g.id]
else:
uniprot_gene = g.id
if uniprot_gene not in genes_to_uniprots:
log.debug('{}: unable to map to UniProt'.format(g.id))
continue
for mapped_uniprot in genes_to_uniprots[uniprot_gene]:
try:
uniprot_prop = g.protein.load_uniprot(uniprot_id=mapped_uniprot, download=True, outdir=outdir,
set_as_representative=set_as_representative,
force_rerun=force_rerun)
except HTTPError as e:
log.error('{}, {}: unable to complete web request'.format(g.id, mapped_uniprot))
print(e)
continue
if uniprot_prop.sequence_file or uniprot_prop.metadata_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes mapped to UniProt'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> UniProt. See the "df_uniprot_metadata" attribute for a summary dataframe.') | python | def uniprot_mapping_and_metadata(self, model_gene_source, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to UniProt IDs using the UniProt mapping service.
Also download all metadata and sequences.
Args:
model_gene_source (str): the database source of your model gene IDs.
See: http://www.uniprot.org/help/api_idmapping
Common model gene sources are:
* Ensembl Genomes - ``ENSEMBLGENOME_ID`` (i.e. E. coli b-numbers)
* Entrez Gene (GeneID) - ``P_ENTREZGENEID``
* RefSeq Protein - ``P_REFSEQ_AC``
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model genes.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# Allow model gene --> custom ID mapping ({'TM_1012':'TM1012'})
if custom_gene_mapping:
genes_to_map = list(custom_gene_mapping.values())
else:
genes_to_map = [x.id for x in self.genes]
# Map all IDs first to available UniProts
genes_to_uniprots = bs_unip.mapping(fr=model_gene_source, to='ACC', query=genes_to_map)
successfully_mapped_counter = 0
for g in tqdm(self.genes):
if custom_gene_mapping and g.id in custom_gene_mapping.keys():
uniprot_gene = custom_gene_mapping[g.id]
else:
uniprot_gene = g.id
if uniprot_gene not in genes_to_uniprots:
log.debug('{}: unable to map to UniProt'.format(g.id))
continue
for mapped_uniprot in genes_to_uniprots[uniprot_gene]:
try:
uniprot_prop = g.protein.load_uniprot(uniprot_id=mapped_uniprot, download=True, outdir=outdir,
set_as_representative=set_as_representative,
force_rerun=force_rerun)
except HTTPError as e:
log.error('{}, {}: unable to complete web request'.format(g.id, mapped_uniprot))
print(e)
continue
if uniprot_prop.sequence_file or uniprot_prop.metadata_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes mapped to UniProt'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> UniProt. See the "df_uniprot_metadata" attribute for a summary dataframe.') | [
"def",
"uniprot_mapping_and_metadata",
"(",
"self",
",",
"model_gene_source",
",",
"custom_gene_mapping",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"set_as_representative",
"=",
"False",
",",
"force_rerun",
"=",
"False",
")",
":",
"# Allow model gene --> custom ID... | Map all genes in the model to UniProt IDs using the UniProt mapping service.
Also download all metadata and sequences.
Args:
model_gene_source (str): the database source of your model gene IDs.
See: http://www.uniprot.org/help/api_idmapping
Common model gene sources are:
* Ensembl Genomes - ``ENSEMBLGENOME_ID`` (i.e. E. coli b-numbers)
* Entrez Gene (GeneID) - ``P_ENTREZGENEID``
* RefSeq Protein - ``P_REFSEQ_AC``
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model genes.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files | [
"Map",
"all",
"genes",
"in",
"the",
"model",
"to",
"UniProt",
"IDs",
"using",
"the",
"UniProt",
"mapping",
"service",
".",
"Also",
"download",
"all",
"metadata",
"and",
"sequences",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L505-L563 | train | 29,057 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.write_representative_sequences_file | def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):
"""Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID
"""
if not outdir:
outdir = self.data_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, outname + '.faa')
tmp = []
for x in self.genes_with_a_representative_sequence:
repseq = x.protein.representative_sequence
copied_seq_record = copy(repseq)
if set_ids_from_model:
copied_seq_record.id = x.id
tmp.append(copied_seq_record)
SeqIO.write(tmp, outfile, "fasta")
log.info('{}: wrote all representative sequences to file'.format(outfile))
self.genome_path = outfile
return self.genome_path | python | def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):
"""Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID
"""
if not outdir:
outdir = self.data_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, outname + '.faa')
tmp = []
for x in self.genes_with_a_representative_sequence:
repseq = x.protein.representative_sequence
copied_seq_record = copy(repseq)
if set_ids_from_model:
copied_seq_record.id = x.id
tmp.append(copied_seq_record)
SeqIO.write(tmp, outfile, "fasta")
log.info('{}: wrote all representative sequences to file'.format(outfile))
self.genome_path = outfile
return self.genome_path | [
"def",
"write_representative_sequences_file",
"(",
"self",
",",
"outname",
",",
"outdir",
"=",
"None",
",",
"set_ids_from_model",
"=",
"True",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"self",
".",
"data_dir",
"if",
"not",
"outdir",
":",
"raise",
... | Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID | [
"Write",
"all",
"the",
"model",
"s",
"sequences",
"as",
"a",
"single",
"FASTA",
"file",
".",
"By",
"default",
"sets",
"IDs",
"to",
"model",
"gene",
"IDs",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L720-L750 | train | 29,058 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.get_tmhmm_predictions | def get_tmhmm_predictions(self, tmhmm_results, custom_gene_mapping=None):
"""Parse TMHMM results and store in the representative sequences.
This is a basic function to parse pre-run TMHMM results. Run TMHMM from the
web service (http://www.cbs.dtu.dk/services/TMHMM/) by doing the following:
1. Write all representative sequences in the GEM-PRO using the function ``write_representative_sequences_file``
2. Upload the file to http://www.cbs.dtu.dk/services/TMHMM/ and choose "Extensive, no graphics" as the output
3. Copy and paste the results (ignoring the top header and above "HELP with output formats") into a file and save it
4. Run this function on that file
Args:
tmhmm_results (str): Path to TMHMM results (long format)
custom_gene_mapping (dict): Default parsing of TMHMM output is to look for the model gene IDs. If
your output file contains IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
# TODO: refactor to Protein class?
tmhmm_dict = ssbio.protein.sequence.properties.tmhmm.parse_tmhmm_long(tmhmm_results)
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in tmhmm_dict:
log.debug('{}: loading TMHMM results'.format(g.id))
if not tmhmm_dict[g_id]:
log.error("{}: missing TMHMM results".format(g.id))
g.protein.representative_sequence.annotations['num_tm_helix-tmhmm'] = tmhmm_dict[g_id]['num_tm_helices']
try:
g.protein.representative_sequence.letter_annotations['TM-tmhmm'] = tmhmm_dict[g_id]['sequence']
counter += 1
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between TMHMM results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
else:
log.error("{}: missing TMHMM results".format(g.id))
log.info('{}/{}: number of genes with TMHMM predictions loaded'.format(counter, len(self.genes))) | python | def get_tmhmm_predictions(self, tmhmm_results, custom_gene_mapping=None):
"""Parse TMHMM results and store in the representative sequences.
This is a basic function to parse pre-run TMHMM results. Run TMHMM from the
web service (http://www.cbs.dtu.dk/services/TMHMM/) by doing the following:
1. Write all representative sequences in the GEM-PRO using the function ``write_representative_sequences_file``
2. Upload the file to http://www.cbs.dtu.dk/services/TMHMM/ and choose "Extensive, no graphics" as the output
3. Copy and paste the results (ignoring the top header and above "HELP with output formats") into a file and save it
4. Run this function on that file
Args:
tmhmm_results (str): Path to TMHMM results (long format)
custom_gene_mapping (dict): Default parsing of TMHMM output is to look for the model gene IDs. If
your output file contains IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
# TODO: refactor to Protein class?
tmhmm_dict = ssbio.protein.sequence.properties.tmhmm.parse_tmhmm_long(tmhmm_results)
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in tmhmm_dict:
log.debug('{}: loading TMHMM results'.format(g.id))
if not tmhmm_dict[g_id]:
log.error("{}: missing TMHMM results".format(g.id))
g.protein.representative_sequence.annotations['num_tm_helix-tmhmm'] = tmhmm_dict[g_id]['num_tm_helices']
try:
g.protein.representative_sequence.letter_annotations['TM-tmhmm'] = tmhmm_dict[g_id]['sequence']
counter += 1
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between TMHMM results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
else:
log.error("{}: missing TMHMM results".format(g.id))
log.info('{}/{}: number of genes with TMHMM predictions loaded'.format(counter, len(self.genes))) | [
"def",
"get_tmhmm_predictions",
"(",
"self",
",",
"tmhmm_results",
",",
"custom_gene_mapping",
"=",
"None",
")",
":",
"# TODO: refactor to Protein class?",
"tmhmm_dict",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"properties",
".",
"tmhmm",
".",
"parse_tmhm... | Parse TMHMM results and store in the representative sequences.
This is a basic function to parse pre-run TMHMM results. Run TMHMM from the
web service (http://www.cbs.dtu.dk/services/TMHMM/) by doing the following:
1. Write all representative sequences in the GEM-PRO using the function ``write_representative_sequences_file``
2. Upload the file to http://www.cbs.dtu.dk/services/TMHMM/ and choose "Extensive, no graphics" as the output
3. Copy and paste the results (ignoring the top header and above "HELP with output formats") into a file and save it
4. Run this function on that file
Args:
tmhmm_results (str): Path to TMHMM results (long format)
custom_gene_mapping (dict): Default parsing of TMHMM output is to look for the model gene IDs. If
your output file contains IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes. | [
"Parse",
"TMHMM",
"results",
"and",
"store",
"in",
"the",
"representative",
"sequences",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L846-L888 | train | 29,059 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.map_uniprot_to_pdb | def map_uniprot_to_pdb(self, seq_ident_cutoff=0.0, outdir=None, force_rerun=False):
"""Map all representative sequences' UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to each protein's ``sequences`` folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID
"""
# First get all UniProt IDs and check if they have PDBs
all_representative_uniprots = []
for g in self.genes_with_a_representative_sequence:
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
# TODO: add warning or something for isoform ids?
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
all_representative_uniprots.append(uniprot_id)
log.info('Mapping UniProt IDs --> PDB IDs...')
uniprots_to_pdbs = bs_unip.mapping(fr='ACC', to='PDB_ID', query=all_representative_uniprots)
counter = 0
# Now run the best_structures API for all genes
for g in tqdm(self.genes_with_a_representative_sequence):
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
if uniprot_id in uniprots_to_pdbs:
best_structures = g.protein.map_uniprot_to_pdb(seq_ident_cutoff=seq_ident_cutoff, outdir=outdir, force_rerun=force_rerun)
if best_structures:
counter += 1
log.debug('{}: {} PDBs mapped'.format(g.id, len(best_structures)))
else:
log.debug('{}, {}: no PDBs available'.format(g.id, uniprot_id))
log.info('{}/{}: number of genes with at least one experimental structure'.format(len(self.genes_with_experimental_structures),
len(self.genes)))
log.info('Completed UniProt --> best PDB mapping. See the "df_pdb_ranking" attribute for a summary dataframe.') | python | def map_uniprot_to_pdb(self, seq_ident_cutoff=0.0, outdir=None, force_rerun=False):
"""Map all representative sequences' UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to each protein's ``sequences`` folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID
"""
# First get all UniProt IDs and check if they have PDBs
all_representative_uniprots = []
for g in self.genes_with_a_representative_sequence:
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
# TODO: add warning or something for isoform ids?
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
all_representative_uniprots.append(uniprot_id)
log.info('Mapping UniProt IDs --> PDB IDs...')
uniprots_to_pdbs = bs_unip.mapping(fr='ACC', to='PDB_ID', query=all_representative_uniprots)
counter = 0
# Now run the best_structures API for all genes
for g in tqdm(self.genes_with_a_representative_sequence):
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
if uniprot_id in uniprots_to_pdbs:
best_structures = g.protein.map_uniprot_to_pdb(seq_ident_cutoff=seq_ident_cutoff, outdir=outdir, force_rerun=force_rerun)
if best_structures:
counter += 1
log.debug('{}: {} PDBs mapped'.format(g.id, len(best_structures)))
else:
log.debug('{}, {}: no PDBs available'.format(g.id, uniprot_id))
log.info('{}/{}: number of genes with at least one experimental structure'.format(len(self.genes_with_experimental_structures),
len(self.genes)))
log.info('Completed UniProt --> best PDB mapping. See the "df_pdb_ranking" attribute for a summary dataframe.') | [
"def",
"map_uniprot_to_pdb",
"(",
"self",
",",
"seq_ident_cutoff",
"=",
"0.0",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"# First get all UniProt IDs and check if they have PDBs",
"all_representative_uniprots",
"=",
"[",
"]",
"for",
"g",
... | Map all representative sequences' UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to each protein's ``sequences`` folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID | [
"Map",
"all",
"representative",
"sequences",
"UniProt",
"ID",
"to",
"PDB",
"IDs",
"using",
"the",
"PDBe",
"Best",
"Structures",
"API",
".",
"Will",
"save",
"a",
"JSON",
"file",
"of",
"the",
"results",
"to",
"each",
"protein",
"s",
"sequences",
"folder",
".... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L952-L999 | train | 29,060 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.get_manual_homology_models | def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):
"""Copy homology models to the GEM-PRO project.
Requires an input of a dictionary formatted like so::
{
model_gene: {
homology_model_id1: {
'model_file': '/path/to/homology/model.pdb',
'file_type': 'pdb'
'additional_info': info_value
},
homology_model_id2: {
'model_file': '/path/to/homology/model.pdb'
'file_type': 'pdb'
}
}
}
Args:
input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
clean (bool): If homology files should be cleaned and saved as a new PDB file
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
if outdir:
outdir_set = True
else:
outdir_set = False
counter = 0
for g in tqdm(self.genes):
if g.id not in input_dict:
continue
if not outdir_set:
outdir = g.protein.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
for hid, hdict in input_dict[g.id].items():
if 'model_file' not in hdict or 'file_type' not in hdict:
raise KeyError('"model_file" and "file_type" must be keys in the manual input dictionary.')
new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'],
file_type=hdict['file_type'], is_experimental=False)
if clean:
new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun),
hdict['file_type'])
else:
copy_to = op.join(outdir, op.basename(hdict['model_file']))
if ssbio.utils.force_rerun(force_rerun, copy_to):
# Just copy the file to the structure directory and store the file name
log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))
shutil.copy2(hdict['model_file'], outdir)
new_homology.load_structure_path(copy_to, hdict['file_type'])
else:
log.debug('{}: homology model already copied to directory'.format(copy_to))
new_homology.load_structure_path(copy_to, hdict['file_type'])
# TODO: need to better handle other info in the provided dictionary, if any
new_homology.update(hdict)
log.debug('{}: updated homology model information and copied model file.'.format(g.id))
counter += 1
log.info('Updated homology model information for {} genes.'.format(counter)) | python | def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):
"""Copy homology models to the GEM-PRO project.
Requires an input of a dictionary formatted like so::
{
model_gene: {
homology_model_id1: {
'model_file': '/path/to/homology/model.pdb',
'file_type': 'pdb'
'additional_info': info_value
},
homology_model_id2: {
'model_file': '/path/to/homology/model.pdb'
'file_type': 'pdb'
}
}
}
Args:
input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
clean (bool): If homology files should be cleaned and saved as a new PDB file
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
if outdir:
outdir_set = True
else:
outdir_set = False
counter = 0
for g in tqdm(self.genes):
if g.id not in input_dict:
continue
if not outdir_set:
outdir = g.protein.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
for hid, hdict in input_dict[g.id].items():
if 'model_file' not in hdict or 'file_type' not in hdict:
raise KeyError('"model_file" and "file_type" must be keys in the manual input dictionary.')
new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'],
file_type=hdict['file_type'], is_experimental=False)
if clean:
new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun),
hdict['file_type'])
else:
copy_to = op.join(outdir, op.basename(hdict['model_file']))
if ssbio.utils.force_rerun(force_rerun, copy_to):
# Just copy the file to the structure directory and store the file name
log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))
shutil.copy2(hdict['model_file'], outdir)
new_homology.load_structure_path(copy_to, hdict['file_type'])
else:
log.debug('{}: homology model already copied to directory'.format(copy_to))
new_homology.load_structure_path(copy_to, hdict['file_type'])
# TODO: need to better handle other info in the provided dictionary, if any
new_homology.update(hdict)
log.debug('{}: updated homology model information and copied model file.'.format(g.id))
counter += 1
log.info('Updated homology model information for {} genes.'.format(counter)) | [
"def",
"get_manual_homology_models",
"(",
"self",
",",
"input_dict",
",",
"outdir",
"=",
"None",
",",
"clean",
"=",
"True",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"outdir",
":",
"outdir_set",
"=",
"True",
"else",
":",
"outdir_set",
"=",
"False",... | Copy homology models to the GEM-PRO project.
Requires an input of a dictionary formatted like so::
{
model_gene: {
homology_model_id1: {
'model_file': '/path/to/homology/model.pdb',
'file_type': 'pdb'
'additional_info': info_value
},
homology_model_id2: {
'model_file': '/path/to/homology/model.pdb'
'file_type': 'pdb'
}
}
}
Args:
input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
clean (bool): If homology files should be cleaned and saved as a new PDB file
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory | [
"Copy",
"homology",
"models",
"to",
"the",
"GEM",
"-",
"PRO",
"project",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1021-L1090 | train | 29,061 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.get_itasser_models | def get_itasser_models(self, homology_raw_dir, custom_itasser_name_mapping=None, outdir=None, force_rerun=False):
"""Copy generated I-TASSER models from a directory to the GEM-PRO directory.
Args:
homology_raw_dir (str): Root directory of I-TASSER folders.
custom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.
Input a dict of {model_gene: ITASSER_folder}.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
counter = 0
for g in tqdm(self.genes):
if custom_itasser_name_mapping and g.id in custom_itasser_name_mapping:
hom_id = custom_itasser_name_mapping[g.id]
if not op.exists(op.join(homology_raw_dir, hom_id)):
hom_id = g.id
else:
hom_id = g.id
# The name of the actual pdb file will be $GENEID_model1.pdb
new_itasser_name = hom_id + '_model1'
orig_itasser_dir = op.join(homology_raw_dir, hom_id)
try:
itasser_prop = g.protein.load_itasser_folder(ident=hom_id, itasser_folder=orig_itasser_dir,
organize=True, outdir=outdir,
organize_name=new_itasser_name,
force_rerun=force_rerun)
except OSError:
log.debug('{}: homology model folder unavailable'.format(g.id))
continue
except IOError:
log.debug('{}: homology model unavailable'.format(g.id))
continue
if itasser_prop.structure_file:
counter += 1
else:
log.debug('{}: homology model file unavailable, perhaps modelling did not finish'.format(g.id))
log.info('Completed copying of {} I-TASSER models to GEM-PRO directory. See the "df_homology_models" attribute for a summary dataframe.'.format(counter)) | python | def get_itasser_models(self, homology_raw_dir, custom_itasser_name_mapping=None, outdir=None, force_rerun=False):
"""Copy generated I-TASSER models from a directory to the GEM-PRO directory.
Args:
homology_raw_dir (str): Root directory of I-TASSER folders.
custom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.
Input a dict of {model_gene: ITASSER_folder}.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
counter = 0
for g in tqdm(self.genes):
if custom_itasser_name_mapping and g.id in custom_itasser_name_mapping:
hom_id = custom_itasser_name_mapping[g.id]
if not op.exists(op.join(homology_raw_dir, hom_id)):
hom_id = g.id
else:
hom_id = g.id
# The name of the actual pdb file will be $GENEID_model1.pdb
new_itasser_name = hom_id + '_model1'
orig_itasser_dir = op.join(homology_raw_dir, hom_id)
try:
itasser_prop = g.protein.load_itasser_folder(ident=hom_id, itasser_folder=orig_itasser_dir,
organize=True, outdir=outdir,
organize_name=new_itasser_name,
force_rerun=force_rerun)
except OSError:
log.debug('{}: homology model folder unavailable'.format(g.id))
continue
except IOError:
log.debug('{}: homology model unavailable'.format(g.id))
continue
if itasser_prop.structure_file:
counter += 1
else:
log.debug('{}: homology model file unavailable, perhaps modelling did not finish'.format(g.id))
log.info('Completed copying of {} I-TASSER models to GEM-PRO directory. See the "df_homology_models" attribute for a summary dataframe.'.format(counter)) | [
"def",
"get_itasser_models",
"(",
"self",
",",
"homology_raw_dir",
",",
"custom_itasser_name_mapping",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"counter",
"=",
"0",
"for",
"g",
"in",
"tqdm",
"(",
"self",
".",
"ge... | Copy generated I-TASSER models from a directory to the GEM-PRO directory.
Args:
homology_raw_dir (str): Root directory of I-TASSER folders.
custom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.
Input a dict of {model_gene: ITASSER_folder}.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory | [
"Copy",
"generated",
"I",
"-",
"TASSER",
"models",
"from",
"a",
"directory",
"to",
"the",
"GEM",
"-",
"PRO",
"directory",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1092-L1135 | train | 29,062 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.set_representative_structure | def set_representative_structure(self, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True, skip_large_structures=False,
clean=True, force_rerun=False):
"""Set all representative structure for proteins from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if GEM-PRO directories
were not created initially
struct_outdir (str): Path to output directory of structure files, must be set if GEM-PRO directories
were not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
clean (bool): If structures should be cleaned
force_rerun (bool): If sequence to structure alignment should be rerun
Todo:
- Remedy large structure representative setting
"""
for g in tqdm(self.genes):
repstruct = g.protein.set_representative_structure(seq_outdir=seq_outdir,
struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type,
engine=engine,
rez_cutoff=rez_cutoff,
seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean,
force_rerun=force_rerun)
log.info('{}/{}: number of genes with a representative structure'.format(len(self.genes_with_a_representative_structure),
len(self.genes)))
log.info('See the "df_representative_structures" attribute for a summary dataframe.') | python | def set_representative_structure(self, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True, skip_large_structures=False,
clean=True, force_rerun=False):
"""Set all representative structure for proteins from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if GEM-PRO directories
were not created initially
struct_outdir (str): Path to output directory of structure files, must be set if GEM-PRO directories
were not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
clean (bool): If structures should be cleaned
force_rerun (bool): If sequence to structure alignment should be rerun
Todo:
- Remedy large structure representative setting
"""
for g in tqdm(self.genes):
repstruct = g.protein.set_representative_structure(seq_outdir=seq_outdir,
struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type,
engine=engine,
rez_cutoff=rez_cutoff,
seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean,
force_rerun=force_rerun)
log.info('{}/{}: number of genes with a representative structure'.format(len(self.genes_with_a_representative_structure),
len(self.genes)))
log.info('See the "df_representative_structures" attribute for a summary dataframe.') | [
"def",
"set_representative_structure",
"(",
"self",
",",
"seq_outdir",
"=",
"None",
",",
"struct_outdir",
"=",
"None",
",",
"pdb_file_type",
"=",
"None",
",",
"engine",
"=",
"'needle'",
",",
"always_use_homology",
"=",
"False",
",",
"rez_cutoff",
"=",
"0.0",
"... | Set all representative structure for proteins from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if GEM-PRO directories
were not created initially
struct_outdir (str): Path to output directory of structure files, must be set if GEM-PRO directories
were not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
clean (bool): If structures should be cleaned
force_rerun (bool): If sequence to structure alignment should be rerun
Todo:
- Remedy large structure representative setting | [
"Set",
"all",
"representative",
"structure",
"for",
"proteins",
"from",
"a",
"structure",
"in",
"the",
"structures",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1157-L1223 | train | 29,063 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.prep_itasser_modeling | def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, all_genes=False, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.data_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.data_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
counter = 0
for g in self.genes_with_a_representative_sequence:
repstruct = g.protein.representative_structure
if repstruct and not all_genes:
log.debug('{}: representative structure set, skipping homology modeling'.format(g.id))
continue
g.protein.prep_itasser_modeling(itasser_installation=itasser_installation,
itlib_folder=itlib_folder, runtype=runtype,
create_in_dir=self.homology_models_dir,
execute_from_dir=execute_from_dir,
print_exec=print_exec, **kwargs)
counter += 1
log.info('Prepared I-TASSER modeling folders for {} genes in folder {}'.format(counter,
self.homology_models_dir)) | python | def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, all_genes=False, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.data_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.data_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
counter = 0
for g in self.genes_with_a_representative_sequence:
repstruct = g.protein.representative_structure
if repstruct and not all_genes:
log.debug('{}: representative structure set, skipping homology modeling'.format(g.id))
continue
g.protein.prep_itasser_modeling(itasser_installation=itasser_installation,
itlib_folder=itlib_folder, runtype=runtype,
create_in_dir=self.homology_models_dir,
execute_from_dir=execute_from_dir,
print_exec=print_exec, **kwargs)
counter += 1
log.info('Prepared I-TASSER modeling folders for {} genes in folder {}'.format(counter,
self.homology_models_dir)) | [
"def",
"prep_itasser_modeling",
"(",
"self",
",",
"itasser_installation",
",",
"itlib_folder",
",",
"runtype",
",",
"create_in_dir",
"=",
"None",
",",
"execute_from_dir",
"=",
"None",
",",
"all_genes",
"=",
"False",
",",
"print_exec",
"=",
"False",
",",
"*",
"... | Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp? | [
"Prepare",
"to",
"run",
"I",
"-",
"TASSER",
"homology",
"modeling",
"for",
"genes",
"without",
"structures",
"or",
"all",
"genes",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1287-L1334 | train | 29,064 |
SBRG/ssbio | ssbio/pipeline/gempro.py | GEMPRO.pdb_downloader_and_metadata | def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
"""
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
counter = 0
for g in tqdm(self.genes):
pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)
if pdbs:
counter += len(pdbs)
log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.')
log.info('Saved {} structures total'.format(counter)) | python | def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
"""
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
counter = 0
for g in tqdm(self.genes):
pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)
if pdbs:
counter += len(pdbs)
log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.')
log.info('Saved {} structures total'.format(counter)) | [
"def",
"pdb_downloader_and_metadata",
"(",
"self",
",",
"outdir",
"=",
"None",
",",
"pdb_file_type",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"pdb_file_type",
":",
"pdb_file_type",
"=",
"self",
".",
"pdb_file_type",
"counter",
"=",... | Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist | [
"Download",
"ALL",
"mapped",
"experimental",
"structures",
"to",
"each",
"protein",
"s",
"structures",
"directory",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1336-L1358 | train | 29,065 |
SBRG/ssbio | ssbio/databases/swissmodel.py | get_oligomeric_state | def get_oligomeric_state(swiss_model_path):
"""Parse the oligomeric prediction in a SWISS-MODEL repository file
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
swiss_model_path (str): Path to SWISS-MODEL PDB file
Returns:
dict: Information parsed about the oligomeric state
"""
oligo_info = {}
with open(swiss_model_path, 'r') as f:
for line in f:
if line.startswith('REMARK 3 MODEL INFORMATION'):
break
for i in range(10):
line = f.readline()
if 'ENGIN' in line:
oligo_info['ENGIN'] = line.rstrip().split(' ')[-1]
elif 'OSTAT' in line:
oligo_info['OSTAT'] = line.rstrip().split(' ')[-1]
elif 'OSRSN' in line:
oligo_info['OSRSN'] = line.rstrip().split(' ')[-1]
elif 'QSPRD' in line:
oligo_info['QSPRD'] = line.rstrip().split(' ')[-1]
elif 'GMQE' in line:
oligo_info['GMQE'] = line.rstrip().split(' ')[-1]
elif 'QMN4' in line:
oligo_info['QMN4'] = line.rstrip().split(' ')[-1]
elif 'MODT' in line:
oligo_info['MODT'] = line.rstrip().split(' ')[-1]
return oligo_info | python | def get_oligomeric_state(swiss_model_path):
"""Parse the oligomeric prediction in a SWISS-MODEL repository file
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
swiss_model_path (str): Path to SWISS-MODEL PDB file
Returns:
dict: Information parsed about the oligomeric state
"""
oligo_info = {}
with open(swiss_model_path, 'r') as f:
for line in f:
if line.startswith('REMARK 3 MODEL INFORMATION'):
break
for i in range(10):
line = f.readline()
if 'ENGIN' in line:
oligo_info['ENGIN'] = line.rstrip().split(' ')[-1]
elif 'OSTAT' in line:
oligo_info['OSTAT'] = line.rstrip().split(' ')[-1]
elif 'OSRSN' in line:
oligo_info['OSRSN'] = line.rstrip().split(' ')[-1]
elif 'QSPRD' in line:
oligo_info['QSPRD'] = line.rstrip().split(' ')[-1]
elif 'GMQE' in line:
oligo_info['GMQE'] = line.rstrip().split(' ')[-1]
elif 'QMN4' in line:
oligo_info['QMN4'] = line.rstrip().split(' ')[-1]
elif 'MODT' in line:
oligo_info['MODT'] = line.rstrip().split(' ')[-1]
return oligo_info | [
"def",
"get_oligomeric_state",
"(",
"swiss_model_path",
")",
":",
"oligo_info",
"=",
"{",
"}",
"with",
"open",
"(",
"swiss_model_path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"'REMARK 3 MODEL... | Parse the oligomeric prediction in a SWISS-MODEL repository file
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
swiss_model_path (str): Path to SWISS-MODEL PDB file
Returns:
dict: Information parsed about the oligomeric state | [
"Parse",
"the",
"oligomeric",
"prediction",
"in",
"a",
"SWISS",
"-",
"MODEL",
"repository",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L168-L201 | train | 29,066 |
SBRG/ssbio | ssbio/databases/swissmodel.py | translate_ostat | def translate_ostat(ostat):
"""Translate the OSTAT field to an integer.
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
ostat (str): Predicted oligomeric state of the PDB file
Returns:
int: Translated string to integer
"""
ostat_lower = ostat.strip().lower()
if ostat_lower == 'monomer':
return 1
elif ostat_lower == 'homo-dimer':
return 2
elif ostat_lower == 'homo-trimer':
return 3
elif ostat_lower == 'homo-tetramer':
return 4
elif ostat_lower == 'homo-pentamer':
return 5
elif ostat_lower == 'homo-hexamer':
return 6
elif ostat_lower == 'homo-heptamer':
return 7
elif ostat_lower == 'homo-octamer':
return 8
else:
num = int(ostat_lower.split('-')[1])
return num | python | def translate_ostat(ostat):
"""Translate the OSTAT field to an integer.
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
ostat (str): Predicted oligomeric state of the PDB file
Returns:
int: Translated string to integer
"""
ostat_lower = ostat.strip().lower()
if ostat_lower == 'monomer':
return 1
elif ostat_lower == 'homo-dimer':
return 2
elif ostat_lower == 'homo-trimer':
return 3
elif ostat_lower == 'homo-tetramer':
return 4
elif ostat_lower == 'homo-pentamer':
return 5
elif ostat_lower == 'homo-hexamer':
return 6
elif ostat_lower == 'homo-heptamer':
return 7
elif ostat_lower == 'homo-octamer':
return 8
else:
num = int(ostat_lower.split('-')[1])
return num | [
"def",
"translate_ostat",
"(",
"ostat",
")",
":",
"ostat_lower",
"=",
"ostat",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"ostat_lower",
"==",
"'monomer'",
":",
"return",
"1",
"elif",
"ostat_lower",
"==",
"'homo-dimer'",
":",
"return",
"2",
"e... | Translate the OSTAT field to an integer.
As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models.
Args:
ostat (str): Predicted oligomeric state of the PDB file
Returns:
int: Translated string to integer | [
"Translate",
"the",
"OSTAT",
"field",
"to",
"an",
"integer",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L204-L235 | train | 29,067 |
SBRG/ssbio | ssbio/databases/swissmodel.py | SWISSMODEL.parse_metadata | def parse_metadata(self):
"""Parse the INDEX_JSON file and reorganize it as a dictionary of lists."""
all_models = defaultdict(list)
with open(self.metadata_index_json) as f:
loaded = json.load(f)
for m in loaded['index']:
all_models[m['uniprot_ac']].append(m)
self.all_models = dict(all_models) | python | def parse_metadata(self):
"""Parse the INDEX_JSON file and reorganize it as a dictionary of lists."""
all_models = defaultdict(list)
with open(self.metadata_index_json) as f:
loaded = json.load(f)
for m in loaded['index']:
all_models[m['uniprot_ac']].append(m)
self.all_models = dict(all_models) | [
"def",
"parse_metadata",
"(",
"self",
")",
":",
"all_models",
"=",
"defaultdict",
"(",
"list",
")",
"with",
"open",
"(",
"self",
".",
"metadata_index_json",
")",
"as",
"f",
":",
"loaded",
"=",
"json",
".",
"load",
"(",
"f",
")",
"for",
"m",
"in",
"lo... | Parse the INDEX_JSON file and reorganize it as a dictionary of lists. | [
"Parse",
"the",
"INDEX_JSON",
"file",
"and",
"reorganize",
"it",
"as",
"a",
"dictionary",
"of",
"lists",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L50-L61 | train | 29,068 |
SBRG/ssbio | ssbio/databases/swissmodel.py | SWISSMODEL.get_models | def get_models(self, uniprot_acc):
"""Return all available models for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
Returns:
dict: All available models in SWISS-MODEL for this UniProt entry
"""
if uniprot_acc in self.all_models:
return self.all_models[uniprot_acc]
else:
log.error('{}: no SWISS-MODELs available'.format(uniprot_acc))
return None | python | def get_models(self, uniprot_acc):
"""Return all available models for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
Returns:
dict: All available models in SWISS-MODEL for this UniProt entry
"""
if uniprot_acc in self.all_models:
return self.all_models[uniprot_acc]
else:
log.error('{}: no SWISS-MODELs available'.format(uniprot_acc))
return None | [
"def",
"get_models",
"(",
"self",
",",
"uniprot_acc",
")",
":",
"if",
"uniprot_acc",
"in",
"self",
".",
"all_models",
":",
"return",
"self",
".",
"all_models",
"[",
"uniprot_acc",
"]",
"else",
":",
"log",
".",
"error",
"(",
"'{}: no SWISS-MODELs available'",
... | Return all available models for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
Returns:
dict: All available models in SWISS-MODEL for this UniProt entry | [
"Return",
"all",
"available",
"models",
"for",
"a",
"UniProt",
"accession",
"number",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L63-L77 | train | 29,069 |
SBRG/ssbio | ssbio/databases/swissmodel.py | SWISSMODEL.get_model_filepath | def get_model_filepath(self, infodict):
"""Get the path to the homology model using information from the index dictionary for a single model.
Example: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.
Use one of those dictionaries as input to this function to get the filepath to the model itself.
Args:
infodict (dict): Information about a model from get_models
Returns:
str: Path to homology model
"""
u = infodict['uniprot_ac']
original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'],
infodict['template'], infodict['coordinate_id'])
file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6],
'swissmodel', '{}.pdb'.format(original_filename))
if op.exists(file_path):
return file_path
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return None | python | def get_model_filepath(self, infodict):
"""Get the path to the homology model using information from the index dictionary for a single model.
Example: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.
Use one of those dictionaries as input to this function to get the filepath to the model itself.
Args:
infodict (dict): Information about a model from get_models
Returns:
str: Path to homology model
"""
u = infodict['uniprot_ac']
original_filename = '{}_{}_{}_{}'.format(infodict['from'], infodict['to'],
infodict['template'], infodict['coordinate_id'])
file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:6],
'swissmodel', '{}.pdb'.format(original_filename))
if op.exists(file_path):
return file_path
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return None | [
"def",
"get_model_filepath",
"(",
"self",
",",
"infodict",
")",
":",
"u",
"=",
"infodict",
"[",
"'uniprot_ac'",
"]",
"original_filename",
"=",
"'{}_{}_{}_{}'",
".",
"format",
"(",
"infodict",
"[",
"'from'",
"]",
",",
"infodict",
"[",
"'to'",
"]",
",",
"inf... | Get the path to the homology model using information from the index dictionary for a single model.
Example: use self.get_models(UNIPROT_ID) to get all the models, which returns a list of dictionaries.
Use one of those dictionaries as input to this function to get the filepath to the model itself.
Args:
infodict (dict): Information about a model from get_models
Returns:
str: Path to homology model | [
"Get",
"the",
"path",
"to",
"the",
"homology",
"model",
"using",
"information",
"from",
"the",
"index",
"dictionary",
"for",
"a",
"single",
"model",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L79-L103 | train | 29,070 |
SBRG/ssbio | ssbio/databases/swissmodel.py | SWISSMODEL.download_models | def download_models(self, uniprot_acc, outdir='', force_rerun=False):
"""Download all models available for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
outdir (str): Path to output directory, uses working directory if not set
force_rerun (bool): Force a redownload the models if they already exist
Returns:
list: Paths to the downloaded models
"""
downloaded = []
subset = self.get_models(uniprot_acc)
for entry in subset:
ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])
outfile = op.join(outdir, ident + '.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = requests.get(entry['url'])
if response.status_code == 404:
log.error('{}: 404 returned, no model available.'.format(ident))
else:
with open(outfile, 'w') as f:
f.write(response.text)
log.debug('{}: downloaded homology model'.format(ident))
downloaded.append(outfile)
else:
downloaded.append(outfile)
return downloaded | python | def download_models(self, uniprot_acc, outdir='', force_rerun=False):
"""Download all models available for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
outdir (str): Path to output directory, uses working directory if not set
force_rerun (bool): Force a redownload the models if they already exist
Returns:
list: Paths to the downloaded models
"""
downloaded = []
subset = self.get_models(uniprot_acc)
for entry in subset:
ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])
outfile = op.join(outdir, ident + '.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = requests.get(entry['url'])
if response.status_code == 404:
log.error('{}: 404 returned, no model available.'.format(ident))
else:
with open(outfile, 'w') as f:
f.write(response.text)
log.debug('{}: downloaded homology model'.format(ident))
downloaded.append(outfile)
else:
downloaded.append(outfile)
return downloaded | [
"def",
"download_models",
"(",
"self",
",",
"uniprot_acc",
",",
"outdir",
"=",
"''",
",",
"force_rerun",
"=",
"False",
")",
":",
"downloaded",
"=",
"[",
"]",
"subset",
"=",
"self",
".",
"get_models",
"(",
"uniprot_acc",
")",
"for",
"entry",
"in",
"subset... | Download all models available for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
outdir (str): Path to output directory, uses working directory if not set
force_rerun (bool): Force a redownload the models if they already exist
Returns:
list: Paths to the downloaded models | [
"Download",
"all",
"models",
"available",
"for",
"a",
"UniProt",
"accession",
"number",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L105-L139 | train | 29,071 |
SBRG/ssbio | ssbio/databases/swissmodel.py | SWISSMODEL.organize_models | def organize_models(self, outdir, force_rerun=False):
"""Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
"""
uniprot_to_swissmodel = defaultdict(list)
for u, models in self.all_models.items():
for m in models:
original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])
file_path = op.join(self.metadata_dir,
u[:2], u[2:4], u[4:], 'swissmodel',
'{}.pdb'.format(original_filename))
if op.exists(file_path):
new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])
shutil.copy(file_path, op.join(outdir, new_filename))
uniprot_to_swissmodel[u].append(new_filename)
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return uniprot_to_swissmodel | python | def organize_models(self, outdir, force_rerun=False):
"""Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
"""
uniprot_to_swissmodel = defaultdict(list)
for u, models in self.all_models.items():
for m in models:
original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])
file_path = op.join(self.metadata_dir,
u[:2], u[2:4], u[4:], 'swissmodel',
'{}.pdb'.format(original_filename))
if op.exists(file_path):
new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])
shutil.copy(file_path, op.join(outdir, new_filename))
uniprot_to_swissmodel[u].append(new_filename)
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return uniprot_to_swissmodel | [
"def",
"organize_models",
"(",
"self",
",",
"outdir",
",",
"force_rerun",
"=",
"False",
")",
":",
"uniprot_to_swissmodel",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"u",
",",
"models",
"in",
"self",
".",
"all_models",
".",
"items",
"(",
")",
":",
"for... | Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values | [
"Organize",
"and",
"rename",
"SWISS",
"-",
"MODEL",
"models",
"to",
"a",
"single",
"folder",
"with",
"a",
"name",
"containing",
"template",
"information",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/swissmodel.py#L141-L166 | train | 29,072 |
SBRG/ssbio | ssbio/protein/sequence/properties/thermostability.py | get_dG_at_T | def get_dG_at_T(seq, temp):
"""Predict dG at temperature T, using best predictions from Dill or Oobatake methods.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
(tuple): tuple containing:
dG (float) Free energy of unfolding dG (cal/mol)
keq (float): Equilibrium constant Keq
method (str): Method used to calculate
"""
# R (molar gas constant) in calories
r_cal = scipy.constants.R / scipy.constants.calorie
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
oobatake = {}
for t in range(20, 51):
oobatake[t] = calculate_oobatake_dG(seq, t)
stable = [i for i in oobatake.values() if i > 0]
if len(stable) == 0:
# If oobatake dG < 0 for all tempertures [20,50], use Dill dG
# and convert the number from J/mol to cal/mol
dG = 0.238846 * calculate_dill_dG(len(seq), temp)
method='Dill'
else:
dG = oobatake[temp]
method='Oobatake'
keq = math.exp(-1 * dG / (r_cal * (temp + 273.15)))
return dG, keq, method | python | def get_dG_at_T(seq, temp):
"""Predict dG at temperature T, using best predictions from Dill or Oobatake methods.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
(tuple): tuple containing:
dG (float) Free energy of unfolding dG (cal/mol)
keq (float): Equilibrium constant Keq
method (str): Method used to calculate
"""
# R (molar gas constant) in calories
r_cal = scipy.constants.R / scipy.constants.calorie
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
oobatake = {}
for t in range(20, 51):
oobatake[t] = calculate_oobatake_dG(seq, t)
stable = [i for i in oobatake.values() if i > 0]
if len(stable) == 0:
# If oobatake dG < 0 for all tempertures [20,50], use Dill dG
# and convert the number from J/mol to cal/mol
dG = 0.238846 * calculate_dill_dG(len(seq), temp)
method='Dill'
else:
dG = oobatake[temp]
method='Oobatake'
keq = math.exp(-1 * dG / (r_cal * (temp + 273.15)))
return dG, keq, method | [
"def",
"get_dG_at_T",
"(",
"seq",
",",
"temp",
")",
":",
"# R (molar gas constant) in calories",
"r_cal",
"=",
"scipy",
".",
"constants",
".",
"R",
"/",
"scipy",
".",
"constants",
".",
"calorie",
"seq",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"... | Predict dG at temperature T, using best predictions from Dill or Oobatake methods.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
temp (float): Temperature in degrees C
Returns:
(tuple): tuple containing:
dG (float) Free energy of unfolding dG (cal/mol)
keq (float): Equilibrium constant Keq
method (str): Method used to calculate | [
"Predict",
"dG",
"at",
"temperature",
"T",
"using",
"best",
"predictions",
"from",
"Dill",
"or",
"Oobatake",
"methods",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/thermostability.py#L156-L193 | train | 29,073 |
SBRG/ssbio | ssbio/protein/structure/properties/opm.py | run_ppm_server | def run_ppm_server(pdb_file, outfile, force_rerun=False):
"""Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = {'userfile': open(pdb_file, 'rb')}
r = requests.post(url, files=files)
info = r.text
# Save results in raw HTML format
with open(outfile, 'w') as f:
f.write(info)
else:
# Utilize existing saved results
with open(outfile, 'r') as f:
info = f.read()
# Clean up the HTML stuff
t = info.replace('\n', '')
tt = t.replace('\r', '')
ttt = tt.replace('\t', '')
soup = BeautifulSoup(ttt, "lxml")
# Find all tables in the HTML code
tables = soup.find_all("table", attrs={"class": "data"})
info_dict = {}
# There are multiple tables with information
table_index = 0
for t in tables:
data_index = 0
# "row1" contains data
for data in t.find_all('tr', attrs={"class": "row1"}):
data_list = list(data.strings)
if table_index == 0:
info_dict['Depth/Hydrophobic Thickness'] = data_list[0]
info_dict['deltaG_transfer'] = data_list[2]
info_dict['Tilt Angle'] = data_list[3]
if table_index == 1 and data_index == 0:
info_dict['Embedded_residues_Tilt'] = data_list[0]
info_dict['Embedded_residues'] = data_list[1]
if table_index == 1 and data_index == 1:
info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]
info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]
if table_index == 2:
info_dict['Output Messages'] = data_list[1]
if table_index == 3:
baseurl = 'http://sunshine.phar.umich.edu/'
a = data.find('a', href=True)
download_url = baseurl + a['href'].replace('./', '')
info_dict['Output file download link'] = download_url
data_index += 1
table_index += 1
return info_dict | python | def run_ppm_server(pdb_file, outfile, force_rerun=False):
"""Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
url = 'http://sunshine.phar.umich.edu/upload_file.php'
files = {'userfile': open(pdb_file, 'rb')}
r = requests.post(url, files=files)
info = r.text
# Save results in raw HTML format
with open(outfile, 'w') as f:
f.write(info)
else:
# Utilize existing saved results
with open(outfile, 'r') as f:
info = f.read()
# Clean up the HTML stuff
t = info.replace('\n', '')
tt = t.replace('\r', '')
ttt = tt.replace('\t', '')
soup = BeautifulSoup(ttt, "lxml")
# Find all tables in the HTML code
tables = soup.find_all("table", attrs={"class": "data"})
info_dict = {}
# There are multiple tables with information
table_index = 0
for t in tables:
data_index = 0
# "row1" contains data
for data in t.find_all('tr', attrs={"class": "row1"}):
data_list = list(data.strings)
if table_index == 0:
info_dict['Depth/Hydrophobic Thickness'] = data_list[0]
info_dict['deltaG_transfer'] = data_list[2]
info_dict['Tilt Angle'] = data_list[3]
if table_index == 1 and data_index == 0:
info_dict['Embedded_residues_Tilt'] = data_list[0]
info_dict['Embedded_residues'] = data_list[1]
if table_index == 1 and data_index == 1:
info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]
info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]
if table_index == 2:
info_dict['Output Messages'] = data_list[1]
if table_index == 3:
baseurl = 'http://sunshine.phar.umich.edu/'
a = data.find('a', href=True)
download_url = baseurl + a['href'].replace('./', '')
info_dict['Output file download link'] = download_url
data_index += 1
table_index += 1
return info_dict | [
"def",
"run_ppm_server",
"(",
"pdb_file",
",",
"outfile",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"ssbio",
".",
"utils",
".",
"force_rerun",
"(",
"outfile",
"=",
"outfile",
",",
"flag",
"=",
"force_rerun",
")",
":",
"url",
"=",
"'http://sunshine.... | Run the PPM server from OPM to predict transmembrane residues.
Args:
pdb_file (str): Path to PDB file
outfile (str): Path to output HTML results file
force_rerun (bool): Flag to rerun PPM if HTML results file already exists
Returns:
dict: Dictionary of information from the PPM run, including a link to download the membrane protein file | [
"Run",
"the",
"PPM",
"server",
"from",
"OPM",
"to",
"predict",
"transmembrane",
"residues",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/opm.py#L44-L116 | train | 29,074 |
SBRG/ssbio | ssbio/protein/sequence/properties/cctop.py | cctop_submit | def cctop_submit(seq_str):
"""Submit a protein sequence string to CCTOP and return the job ID.
Args:
seq_str (str): Protein sequence as a string
Returns:
dict: Job ID on the CCTOP server
"""
url = 'http://cctop.enzim.ttk.mta.hu/php/submit.php?sequence={}&tmFilter&signalPred'.format(seq_str)
r = requests.post(url)
jobid = r.text.split('ID: ')[1]
return jobid | python | def cctop_submit(seq_str):
"""Submit a protein sequence string to CCTOP and return the job ID.
Args:
seq_str (str): Protein sequence as a string
Returns:
dict: Job ID on the CCTOP server
"""
url = 'http://cctop.enzim.ttk.mta.hu/php/submit.php?sequence={}&tmFilter&signalPred'.format(seq_str)
r = requests.post(url)
jobid = r.text.split('ID: ')[1]
return jobid | [
"def",
"cctop_submit",
"(",
"seq_str",
")",
":",
"url",
"=",
"'http://cctop.enzim.ttk.mta.hu/php/submit.php?sequence={}&tmFilter&signalPred'",
".",
"format",
"(",
"seq_str",
")",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
")",
"jobid",
"=",
"r",
".",
"text",
... | Submit a protein sequence string to CCTOP and return the job ID.
Args:
seq_str (str): Protein sequence as a string
Returns:
dict: Job ID on the CCTOP server | [
"Submit",
"a",
"protein",
"sequence",
"string",
"to",
"CCTOP",
"and",
"return",
"the",
"job",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/cctop.py#L6-L20 | train | 29,075 |
SBRG/ssbio | ssbio/protein/sequence/properties/cctop.py | cctop_check_status | def cctop_check_status(jobid):
"""Check the status of a CCTOP job ID.
Args:
jobid (str): Job ID obtained when job was submitted
Returns:
str: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress,
'Invalid' for any errors.
"""
status = 'http://cctop.enzim.ttk.mta.hu/php/poll.php?jobId={}'.format(jobid)
status_text = requests.post(status)
return status_text.text | python | def cctop_check_status(jobid):
"""Check the status of a CCTOP job ID.
Args:
jobid (str): Job ID obtained when job was submitted
Returns:
str: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress,
'Invalid' for any errors.
"""
status = 'http://cctop.enzim.ttk.mta.hu/php/poll.php?jobId={}'.format(jobid)
status_text = requests.post(status)
return status_text.text | [
"def",
"cctop_check_status",
"(",
"jobid",
")",
":",
"status",
"=",
"'http://cctop.enzim.ttk.mta.hu/php/poll.php?jobId={}'",
".",
"format",
"(",
"jobid",
")",
"status_text",
"=",
"requests",
".",
"post",
"(",
"status",
")",
"return",
"status_text",
".",
"text"
] | Check the status of a CCTOP job ID.
Args:
jobid (str): Job ID obtained when job was submitted
Returns:
str: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress,
'Invalid' for any errors. | [
"Check",
"the",
"status",
"of",
"a",
"CCTOP",
"job",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/cctop.py#L23-L36 | train | 29,076 |
SBRG/ssbio | ssbio/protein/sequence/properties/cctop.py | cctop_save_xml | def cctop_save_xml(jobid, outpath):
"""Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
"""
status = cctop_check_status(jobid=jobid)
if status == 'Finished':
result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid)
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status)) | python | def cctop_save_xml(jobid, outpath):
"""Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
"""
status = cctop_check_status(jobid=jobid)
if status == 'Finished':
result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid)
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status)) | [
"def",
"cctop_save_xml",
"(",
"jobid",
",",
"outpath",
")",
":",
"status",
"=",
"cctop_check_status",
"(",
"jobid",
"=",
"jobid",
")",
"if",
"status",
"==",
"'Finished'",
":",
"result",
"=",
"'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'",
".",
"format",
... | Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename | [
"Save",
"the",
"CCTOP",
"results",
"file",
"in",
"XML",
"format",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/cctop.py#L39-L58 | train | 29,077 |
SBRG/ssbio | ssbio/pipeline/atlas3.py | load_feather | def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
"""Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
"""
protein_df = pd.read_feather(protein_feather).set_index('index')
# Combine counts for residue groups
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {
'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],
'subseqs' : ['metal_2_5D', 'metal_3D']},
'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',
'surface_3D']},
'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']},
'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']}}
# Do combination counts for all types of subsequences
for suffix, info in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + '_aa_count_' + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series
protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df
## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length
## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index
protein_df = protein_df[keep_cols]
# Multiply by proteomics copy number?
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError('Please supply copy numbers')
protein_id = op.basename(protein_feather).split('_protein')[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, 'copynum']
if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?
protein_df = protein_df * copynum
return protein_df | python | def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
"""Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together
"""
protein_df = pd.read_feather(protein_feather).set_index('index')
# Combine counts for residue groups
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {
'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],
'subseqs' : ['metal_2_5D', 'metal_3D']},
'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',
'surface_3D']},
'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],
'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},
'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],
'subseqs' : ['tm_2D', 'tm_3D']},
'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']},
'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],
'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',
'dna_2_5D']}}
# Do combination counts for all types of subsequences
for suffix, info in aggregators.items():
agg_residues = info['residues']
for prefix in info['subseqs']:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + '_aa_count_' + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series
protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df
## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length
## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length
if length_filter_pid:
keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index
protein_df = protein_df[keep_cols]
# Multiply by proteomics copy number?
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError('Please supply copy numbers')
protein_id = op.basename(protein_feather).split('_protein')[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, 'copynum']
if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?
protein_df = protein_df * copynum
return protein_df | [
"def",
"load_feather",
"(",
"protein_feather",
",",
"length_filter_pid",
"=",
"None",
",",
"copynum_scale",
"=",
"False",
",",
"copynum_df",
"=",
"None",
")",
":",
"protein_df",
"=",
"pd",
".",
"read_feather",
"(",
"protein_feather",
")",
".",
"set_index",
"("... | Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together | [
"Load",
"a",
"feather",
"of",
"amino",
"acid",
"counts",
"for",
"a",
"protein",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L128-L195 | train | 29,078 |
SBRG/ssbio | ssbio/pipeline/atlas3.py | get_proteome_counts_impute_missing | def get_proteome_counts_impute_missing(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
"""Get counts, uses the mean feature vector to fill in missing proteins for a strain"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(index=_all_counts, columns=loaded.columns)
first = False
new_columns = list(set(loaded.columns.tolist()).difference(big_strain_counts_df.columns))
if new_columns:
for col in new_columns:
big_strain_counts_df[col] = big_strain_counts_df.mean(axis=1)
not_in_loaded = list(set(big_strain_counts_df.columns).difference(loaded.columns.tolist()))
if not_in_loaded:
for col in not_in_loaded:
big_strain_counts_df[col] = big_strain_counts_df[col] + loaded.mean(axis=1)
big_strain_counts_df = big_strain_counts_df.add(loaded, fill_value=0)
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index') | python | def get_proteome_counts_impute_missing(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
"""Get counts, uses the mean feature vector to fill in missing proteins for a strain"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(index=_all_counts, columns=loaded.columns)
first = False
new_columns = list(set(loaded.columns.tolist()).difference(big_strain_counts_df.columns))
if new_columns:
for col in new_columns:
big_strain_counts_df[col] = big_strain_counts_df.mean(axis=1)
not_in_loaded = list(set(big_strain_counts_df.columns).difference(loaded.columns.tolist()))
if not_in_loaded:
for col in not_in_loaded:
big_strain_counts_df[col] = big_strain_counts_df[col] + loaded.mean(axis=1)
big_strain_counts_df = big_strain_counts_df.add(loaded, fill_value=0)
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index') | [
"def",
"get_proteome_counts_impute_missing",
"(",
"prots_filtered_feathers",
",",
"outpath",
",",
"length_filter_pid",
"=",
"None",
",",
"copynum_scale",
"=",
"False",
",",
"copynum_df",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"ssbio",
".",
... | Get counts, uses the mean feature vector to fill in missing proteins for a strain | [
"Get",
"counts",
"uses",
"the",
"mean",
"feature",
"vector",
"to",
"fill",
"in",
"missing",
"proteins",
"for",
"a",
"strain"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L235-L266 | train | 29,079 |
SBRG/ssbio | ssbio/pipeline/atlas3.py | get_proteome_correct_percentages | def get_proteome_correct_percentages(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
"""Get counts and normalize by number of proteins, providing percentages"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
prot_tracker = defaultdict(int)
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(columns=loaded.columns)
first = False
tmp_df = pd.DataFrame(columns=loaded.columns)
for strain in loaded.columns:
prot_tracker[strain] += 1
totals = list(filter(lambda x: x.endswith('total'), loaded[strain].index))
for t in totals:
counts = t.rsplit('_', 1)[0]
aa_counts = list(
filter(lambda x: (x.startswith(counts) and x not in totals), loaded[strain].index))
for aa_count in aa_counts:
tmp_df.at[aa_count.replace('count', '%'), strain] = loaded[strain][aa_count] / \
loaded[strain][t]
big_strain_counts_df = big_strain_counts_df.add(tmp_df, fill_value=0)
for c, total in prot_tracker.items():
big_strain_counts_df.loc[:, c] /= total
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index') | python | def get_proteome_correct_percentages(prots_filtered_feathers, outpath, length_filter_pid=None,
copynum_scale=False, copynum_df=None,
force_rerun=False):
"""Get counts and normalize by number of proteins, providing percentages"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):
prot_tracker = defaultdict(int)
big_strain_counts_df = pd.DataFrame()
first = True
for feather in prots_filtered_feathers:
loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,
copynum_scale=copynum_scale,
copynum_df=copynum_df)
if first:
big_strain_counts_df = pd.DataFrame(columns=loaded.columns)
first = False
tmp_df = pd.DataFrame(columns=loaded.columns)
for strain in loaded.columns:
prot_tracker[strain] += 1
totals = list(filter(lambda x: x.endswith('total'), loaded[strain].index))
for t in totals:
counts = t.rsplit('_', 1)[0]
aa_counts = list(
filter(lambda x: (x.startswith(counts) and x not in totals), loaded[strain].index))
for aa_count in aa_counts:
tmp_df.at[aa_count.replace('count', '%'), strain] = loaded[strain][aa_count] / \
loaded[strain][t]
big_strain_counts_df = big_strain_counts_df.add(tmp_df, fill_value=0)
for c, total in prot_tracker.items():
big_strain_counts_df.loc[:, c] /= total
if len(big_strain_counts_df) > 0:
big_strain_counts_df.astype(float).reset_index().to_feather(outpath)
return big_strain_counts_df
else:
return pd.read_feather(outpath).set_index('index') | [
"def",
"get_proteome_correct_percentages",
"(",
"prots_filtered_feathers",
",",
"outpath",
",",
"length_filter_pid",
"=",
"None",
",",
"copynum_scale",
"=",
"False",
",",
"copynum_df",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"ssbio",
".",
... | Get counts and normalize by number of proteins, providing percentages | [
"Get",
"counts",
"and",
"normalize",
"by",
"number",
"of",
"proteins",
"providing",
"percentages"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L288-L324 | train | 29,080 |
SBRG/ssbio | ssbio/pipeline/atlas3.py | run_all2 | def run_all2(protgroup, memornot, subsequences, base_outdir,
protgroup_dict, protein_feathers_dir, date, errfile, impute_counts=True,
cutoff_num_proteins=0, core_only_genes=None,
length_filter_pid=.8, remove_correlated_feats=True,
force_rerun_counts=False, force_rerun_percentages=False, force_rerun_pca=False):
"""run_all but ignoring observations before pca"""
import ssbio.utils
# Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything
import os
os.environ['OMP_NUM_THREADS'] = '1'
# First, filter down the protein group to the membrane/nonmembrane definition
prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,
protgroup_dict=protgroup_dict,
protein_feathers_dir=protein_feathers_dir,
core_only_genes=core_only_genes)
num_proteins = len(prots_filtered_feathers)
if num_proteins <= cutoff_num_proteins:
return
# Make output directories
protscale = 'proteome_unscaled'
outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale))
outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot)))
outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup)))
if impute_counts:
big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_IMP.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_counts)
big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_IMP.fthr'.format(
date)),
force_rerun=force_rerun_percentages)
pca_pickle = op.join(outdir_final, '{}-subsequence_pca.pckl'.format(date))
# Divide by totals to get percentages in a new dataframe
else:
try:
big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_AVG.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_percentages)
pca_pickle = op.join(outdir_final, '{}-subsequence_pca_AVG.pckl'.format(date))
except:
with open(errfile, "a") as myfile:
myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
return
if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle):
# Then, get filters for rows of the loaded feathers for interested subsequences
keep_subsequences = get_interested_subsequences(subsequences=subsequences)
# Some numbers: number of features
num_feats = len(big_strain_percents_df)
# Make an unwieldy title
big_title = 'LOC={0}; PROTGROUP={1};\n' \
'NUMPROTS={2}; NUMFEATS={3}'.format('-'.join(memornot),
'-'.join(protgroup),
num_proteins,
num_feats)
# Run PCA and make plots
runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=pd.DataFrame(), plot_title=big_title)
try:
runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats)
except:
with open(errfile, "a") as myfile:
myfile.write(
'CLEAN ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
return
# try:
runner.run_pca()
# except:
# with open(errfile, "a") as myfile:
# myfile.write(
# 'PCA ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
# return
with open(pca_pickle, 'wb') as f:
pickle.dump(runner, f)
else:
with open(pca_pickle, 'rb') as f:
runner = pickle.load(f) | python | def run_all2(protgroup, memornot, subsequences, base_outdir,
protgroup_dict, protein_feathers_dir, date, errfile, impute_counts=True,
cutoff_num_proteins=0, core_only_genes=None,
length_filter_pid=.8, remove_correlated_feats=True,
force_rerun_counts=False, force_rerun_percentages=False, force_rerun_pca=False):
"""run_all but ignoring observations before pca"""
import ssbio.utils
# Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything
import os
os.environ['OMP_NUM_THREADS'] = '1'
# First, filter down the protein group to the membrane/nonmembrane definition
prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,
protgroup_dict=protgroup_dict,
protein_feathers_dir=protein_feathers_dir,
core_only_genes=core_only_genes)
num_proteins = len(prots_filtered_feathers)
if num_proteins <= cutoff_num_proteins:
return
# Make output directories
protscale = 'proteome_unscaled'
outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale))
outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot)))
outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup)))
if impute_counts:
big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_IMP.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_counts)
big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_IMP.fthr'.format(
date)),
force_rerun=force_rerun_percentages)
pca_pickle = op.join(outdir_final, '{}-subsequence_pca.pckl'.format(date))
# Divide by totals to get percentages in a new dataframe
else:
try:
big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers,
outpath=op.join(outdir_final,
'{}-subsequence_proteome_perc_AVG.fthr'.format(
date)),
length_filter_pid=length_filter_pid,
force_rerun=force_rerun_percentages)
pca_pickle = op.join(outdir_final, '{}-subsequence_pca_AVG.pckl'.format(date))
except:
with open(errfile, "a") as myfile:
myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
return
if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle):
# Then, get filters for rows of the loaded feathers for interested subsequences
keep_subsequences = get_interested_subsequences(subsequences=subsequences)
# Some numbers: number of features
num_feats = len(big_strain_percents_df)
# Make an unwieldy title
big_title = 'LOC={0}; PROTGROUP={1};\n' \
'NUMPROTS={2}; NUMFEATS={3}'.format('-'.join(memornot),
'-'.join(protgroup),
num_proteins,
num_feats)
# Run PCA and make plots
runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=pd.DataFrame(), plot_title=big_title)
try:
runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats)
except:
with open(errfile, "a") as myfile:
myfile.write(
'CLEAN ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
return
# try:
runner.run_pca()
# except:
# with open(errfile, "a") as myfile:
# myfile.write(
# 'PCA ERR: ' + '-'.join(memornot) + '\t' + '-'.join(protgroup) + "\n")
# return
with open(pca_pickle, 'wb') as f:
pickle.dump(runner, f)
else:
with open(pca_pickle, 'rb') as f:
runner = pickle.load(f) | [
"def",
"run_all2",
"(",
"protgroup",
",",
"memornot",
",",
"subsequences",
",",
"base_outdir",
",",
"protgroup_dict",
",",
"protein_feathers_dir",
",",
"date",
",",
"errfile",
",",
"impute_counts",
"=",
"True",
",",
"cutoff_num_proteins",
"=",
"0",
",",
"core_on... | run_all but ignoring observations before pca | [
"run_all",
"but",
"ignoring",
"observations",
"before",
"pca"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L1000-L1093 | train | 29,081 |
SBRG/ssbio | ssbio/pipeline/atlas3.py | PCAMultiROS.make_contribplot | def make_contribplot(self, pc_to_look_at=1, sigadder=0.01, outpath=None, dpi=150, return_top_contribs=False):
"""Make a plot showing contributions of properties to a PC"""
cont = pd.DataFrame(self.pca.components_, columns=self.features_df.index, index=self.pc_names_list)
tmp_df = pd.DataFrame(cont.iloc[pc_to_look_at - 1]).reset_index().rename(columns={'index': 'Property'})
tmp_df['Contribution'] = tmp_df.iloc[:, 1] ** 2
tmp_df = tmp_df[tmp_df['Contribution'] > 1 / len(
cont.iloc[0]) + sigadder] # Alter sigadder to just plot more/less significant contributors
tmp_df['Sign'] = np.where(tmp_df.iloc[:, 1] >= 0, 'Positive', 'Negative')
tmp_df = tmp_df.sort_values(by='Contribution', ascending=False)
fig, ax = plt.subplots(figsize=(30, 10))
sns.barplot(data=tmp_df, y='Property', x='Contribution', hue='Sign', dodge=False, ax=ax, hue_order=['Positive', 'Negative'],
palette=sns.color_palette("coolwarm", 2))
# Random formatting crap
self._change_height(ax, .6) # Make bars thinner
ax.set_title('{} contributors'.format(self.pc_names_list[pc_to_look_at - 1]))
legend = plt.legend(loc=8, bbox_to_anchor=(1.2, .8), ncol=1, title='Sign', fontsize=10)
plt.setp(legend.get_title(), fontsize=12)
plt.gcf().subplots_adjust(left=.5, right=.65)
if outpath:
fig.savefig(outpath, dpi=dpi)
else:
plt.show()
plt.close()
if return_top_contribs:
return tmp_df.Property.values.tolist() | python | def make_contribplot(self, pc_to_look_at=1, sigadder=0.01, outpath=None, dpi=150, return_top_contribs=False):
"""Make a plot showing contributions of properties to a PC"""
cont = pd.DataFrame(self.pca.components_, columns=self.features_df.index, index=self.pc_names_list)
tmp_df = pd.DataFrame(cont.iloc[pc_to_look_at - 1]).reset_index().rename(columns={'index': 'Property'})
tmp_df['Contribution'] = tmp_df.iloc[:, 1] ** 2
tmp_df = tmp_df[tmp_df['Contribution'] > 1 / len(
cont.iloc[0]) + sigadder] # Alter sigadder to just plot more/less significant contributors
tmp_df['Sign'] = np.where(tmp_df.iloc[:, 1] >= 0, 'Positive', 'Negative')
tmp_df = tmp_df.sort_values(by='Contribution', ascending=False)
fig, ax = plt.subplots(figsize=(30, 10))
sns.barplot(data=tmp_df, y='Property', x='Contribution', hue='Sign', dodge=False, ax=ax, hue_order=['Positive', 'Negative'],
palette=sns.color_palette("coolwarm", 2))
# Random formatting crap
self._change_height(ax, .6) # Make bars thinner
ax.set_title('{} contributors'.format(self.pc_names_list[pc_to_look_at - 1]))
legend = plt.legend(loc=8, bbox_to_anchor=(1.2, .8), ncol=1, title='Sign', fontsize=10)
plt.setp(legend.get_title(), fontsize=12)
plt.gcf().subplots_adjust(left=.5, right=.65)
if outpath:
fig.savefig(outpath, dpi=dpi)
else:
plt.show()
plt.close()
if return_top_contribs:
return tmp_df.Property.values.tolist() | [
"def",
"make_contribplot",
"(",
"self",
",",
"pc_to_look_at",
"=",
"1",
",",
"sigadder",
"=",
"0.01",
",",
"outpath",
"=",
"None",
",",
"dpi",
"=",
"150",
",",
"return_top_contribs",
"=",
"False",
")",
":",
"cont",
"=",
"pd",
".",
"DataFrame",
"(",
"se... | Make a plot showing contributions of properties to a PC | [
"Make",
"a",
"plot",
"showing",
"contributions",
"of",
"properties",
"to",
"a",
"PC"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L605-L632 | train | 29,082 |
SBRG/ssbio | ssbio/pipeline/atlas3.py | PCAMultiROS._change_height | def _change_height(self, ax, new_value):
"""Make bars in horizontal bar chart thinner"""
for patch in ax.patches:
current_height = patch.get_height()
diff = current_height - new_value
# we change the bar height
patch.set_height(new_value)
# we recenter the bar
patch.set_y(patch.get_y() + diff * .5) | python | def _change_height(self, ax, new_value):
"""Make bars in horizontal bar chart thinner"""
for patch in ax.patches:
current_height = patch.get_height()
diff = current_height - new_value
# we change the bar height
patch.set_height(new_value)
# we recenter the bar
patch.set_y(patch.get_y() + diff * .5) | [
"def",
"_change_height",
"(",
"self",
",",
"ax",
",",
"new_value",
")",
":",
"for",
"patch",
"in",
"ax",
".",
"patches",
":",
"current_height",
"=",
"patch",
".",
"get_height",
"(",
")",
"diff",
"=",
"current_height",
"-",
"new_value",
"# we change the bar h... | Make bars in horizontal bar chart thinner | [
"Make",
"bars",
"in",
"horizontal",
"bar",
"chart",
"thinner"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas3.py#L634-L644 | train | 29,083 |
SBRG/ssbio | ssbio/complex/oligomer.py | write_merged_bioassembly | def write_merged_bioassembly(inpath, outdir, outname, force_rerun=False):
"""Utility to take as input a bioassembly file and merge all its models into multiple chains in a single model.
Args:
infile (str): Path to input PDB file with multiple models that represent an oligomeric form of a structure.
outdir (str): Path to output directory
outname (str): New filename of structure file
force_rerun (bool): If a new PDB should be written if the file exists
Returns:
str: Path to newly written PDB file.
"""
outpath = outfile=op.join(outdir, outname + '.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=op.join(outdir, outname + '.pdb')):
s = StructProp('Model merging', structure_path=inpath, file_type='pdb')
ss = s.parse_structure()
merge_all_models_into_first_model(ss.structure)
outpath = ss.write_pdb(custom_name=outname, out_dir=outdir, force_rerun=force_rerun)
else:
return outpath | python | def write_merged_bioassembly(inpath, outdir, outname, force_rerun=False):
"""Utility to take as input a bioassembly file and merge all its models into multiple chains in a single model.
Args:
infile (str): Path to input PDB file with multiple models that represent an oligomeric form of a structure.
outdir (str): Path to output directory
outname (str): New filename of structure file
force_rerun (bool): If a new PDB should be written if the file exists
Returns:
str: Path to newly written PDB file.
"""
outpath = outfile=op.join(outdir, outname + '.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=op.join(outdir, outname + '.pdb')):
s = StructProp('Model merging', structure_path=inpath, file_type='pdb')
ss = s.parse_structure()
merge_all_models_into_first_model(ss.structure)
outpath = ss.write_pdb(custom_name=outname, out_dir=outdir, force_rerun=force_rerun)
else:
return outpath | [
"def",
"write_merged_bioassembly",
"(",
"inpath",
",",
"outdir",
",",
"outname",
",",
"force_rerun",
"=",
"False",
")",
":",
"outpath",
"=",
"outfile",
"=",
"op",
".",
"join",
"(",
"outdir",
",",
"outname",
"+",
"'.pdb'",
")",
"if",
"ssbio",
".",
"utils"... | Utility to take as input a bioassembly file and merge all its models into multiple chains in a single model.
Args:
infile (str): Path to input PDB file with multiple models that represent an oligomeric form of a structure.
outdir (str): Path to output directory
outname (str): New filename of structure file
force_rerun (bool): If a new PDB should be written if the file exists
Returns:
str: Path to newly written PDB file. | [
"Utility",
"to",
"take",
"as",
"input",
"a",
"bioassembly",
"file",
"and",
"merge",
"all",
"its",
"models",
"into",
"multiple",
"chains",
"in",
"a",
"single",
"model",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/complex/oligomer.py#L96-L117 | train | 29,084 |
SBRG/ssbio | ssbio/io/__init__.py | save_json | def save_json(obj, outfile, allow_nan=True, compression=False):
"""Save an ssbio object as a JSON file using json_tricks"""
if compression:
with open(outfile, 'wb') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
else:
with open(outfile, 'w') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
log.info('Saved {} (id: {}) to {}'.format(type(obj), obj.id, outfile)) | python | def save_json(obj, outfile, allow_nan=True, compression=False):
"""Save an ssbio object as a JSON file using json_tricks"""
if compression:
with open(outfile, 'wb') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
else:
with open(outfile, 'w') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
log.info('Saved {} (id: {}) to {}'.format(type(obj), obj.id, outfile)) | [
"def",
"save_json",
"(",
"obj",
",",
"outfile",
",",
"allow_nan",
"=",
"True",
",",
"compression",
"=",
"False",
")",
":",
"if",
"compression",
":",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"f",
":",
"dump",
"(",
"obj",
",",
"f",
","... | Save an ssbio object as a JSON file using json_tricks | [
"Save",
"an",
"ssbio",
"object",
"as",
"a",
"JSON",
"file",
"using",
"json_tricks"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/io/__init__.py#L9-L17 | train | 29,085 |
SBRG/ssbio | ssbio/io/__init__.py | load_json | def load_json(file, new_root_dir=None, decompression=False):
"""Load a JSON file using json_tricks"""
if decompression:
with open(file, 'rb') as f:
my_object = load(f, decompression=decompression)
else:
with open(file, 'r') as f:
my_object = load(f, decompression=decompression)
if new_root_dir:
my_object.root_dir = new_root_dir
return my_object | python | def load_json(file, new_root_dir=None, decompression=False):
"""Load a JSON file using json_tricks"""
if decompression:
with open(file, 'rb') as f:
my_object = load(f, decompression=decompression)
else:
with open(file, 'r') as f:
my_object = load(f, decompression=decompression)
if new_root_dir:
my_object.root_dir = new_root_dir
return my_object | [
"def",
"load_json",
"(",
"file",
",",
"new_root_dir",
"=",
"None",
",",
"decompression",
"=",
"False",
")",
":",
"if",
"decompression",
":",
"with",
"open",
"(",
"file",
",",
"'rb'",
")",
"as",
"f",
":",
"my_object",
"=",
"load",
"(",
"f",
",",
"deco... | Load a JSON file using json_tricks | [
"Load",
"a",
"JSON",
"file",
"using",
"json_tricks"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/io/__init__.py#L20-L31 | train | 29,086 |
SBRG/ssbio | ssbio/io/__init__.py | save_pickle | def save_pickle(obj, outfile, protocol=2):
"""Save the object as a pickle file
Args:
outfile (str): Filename
protocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2
Returns:
str: Path to pickle file
"""
with open(outfile, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
return outfile | python | def save_pickle(obj, outfile, protocol=2):
"""Save the object as a pickle file
Args:
outfile (str): Filename
protocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2
Returns:
str: Path to pickle file
"""
with open(outfile, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
return outfile | [
"def",
"save_pickle",
"(",
"obj",
",",
"outfile",
",",
"protocol",
"=",
"2",
")",
":",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"f",
",",
"protocol",
"=",
"protocol",
")",
"return",
... | Save the object as a pickle file
Args:
outfile (str): Filename
protocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2
Returns:
str: Path to pickle file | [
"Save",
"the",
"object",
"as",
"a",
"pickle",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/io/__init__.py#L34-L48 | train | 29,087 |
SBRG/ssbio | ssbio/io/__init__.py | load_pickle | def load_pickle(file, encoding=None):
"""Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
"""
# TODO: test set encoding='latin1' for 2/3 incompatibility
if encoding:
with open(file, 'rb') as f:
return pickle.load(f, encoding=encoding)
with open(file, 'rb') as f:
return pickle.load(f) | python | def load_pickle(file, encoding=None):
"""Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file
"""
# TODO: test set encoding='latin1' for 2/3 incompatibility
if encoding:
with open(file, 'rb') as f:
return pickle.load(f, encoding=encoding)
with open(file, 'rb') as f:
return pickle.load(f) | [
"def",
"load_pickle",
"(",
"file",
",",
"encoding",
"=",
"None",
")",
":",
"# TODO: test set encoding='latin1' for 2/3 incompatibility",
"if",
"encoding",
":",
"with",
"open",
"(",
"file",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"pickle",
".",
"load",
"(",... | Load a pickle file.
Args:
file (str): Path to pickle file
Returns:
object: Loaded object from pickle file | [
"Load",
"a",
"pickle",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/io/__init__.py#L51-L67 | train | 29,088 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/__init__.py | read | def read( handle, id=None ):
"""
Reads a structure via PDBParser.
Simplifies life..
"""
from Bio.PDB import PDBParser
if not id:
id = os.path.basename(handle).split('.')[0] # Get from filename
p = PDBParser()
s = p.get_structure(id, handle)
return s | python | def read( handle, id=None ):
"""
Reads a structure via PDBParser.
Simplifies life..
"""
from Bio.PDB import PDBParser
if not id:
id = os.path.basename(handle).split('.')[0] # Get from filename
p = PDBParser()
s = p.get_structure(id, handle)
return s | [
"def",
"read",
"(",
"handle",
",",
"id",
"=",
"None",
")",
":",
"from",
"Bio",
".",
"PDB",
"import",
"PDBParser",
"if",
"not",
"id",
":",
"id",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"handle",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0"... | Reads a structure via PDBParser.
Simplifies life.. | [
"Reads",
"a",
"structure",
"via",
"PDBParser",
".",
"Simplifies",
"life",
".."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/__init__.py#L11-L25 | train | 29,089 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/__init__.py | write | def write( structure, name=None ):
"""
Writes a Structure in PDB format through PDBIO.
Simplifies life..
"""
from Bio.PDB import PDBIO
io = PDBIO()
io.set_structure(structure)
if not name:
s_name = structure.id
else:
s_name = name
name = "%s.pdb" %s_name
seed = 0
while 1:
if os.path.exists(name):
name = "%s_%s.pdb" %(s_name, seed)
seed +=1
else:
break
io.save(name)
return name | python | def write( structure, name=None ):
"""
Writes a Structure in PDB format through PDBIO.
Simplifies life..
"""
from Bio.PDB import PDBIO
io = PDBIO()
io.set_structure(structure)
if not name:
s_name = structure.id
else:
s_name = name
name = "%s.pdb" %s_name
seed = 0
while 1:
if os.path.exists(name):
name = "%s_%s.pdb" %(s_name, seed)
seed +=1
else:
break
io.save(name)
return name | [
"def",
"write",
"(",
"structure",
",",
"name",
"=",
"None",
")",
":",
"from",
"Bio",
".",
"PDB",
"import",
"PDBIO",
"io",
"=",
"PDBIO",
"(",
")",
"io",
".",
"set_structure",
"(",
"structure",
")",
"if",
"not",
"name",
":",
"s_name",
"=",
"structure",... | Writes a Structure in PDB format through PDBIO.
Simplifies life.. | [
"Writes",
"a",
"Structure",
"in",
"PDB",
"format",
"through",
"PDBIO",
".",
"Simplifies",
"life",
".."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/__init__.py#L27-L56 | train | 29,090 |
SBRG/ssbio | ssbio/databases/pisa.py | download_pisa_multimers_xml | def download_pisa_multimers_xml(pdb_ids, save_single_xml_files=True, outdir=None, force_rerun=False):
"""Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded
"""
if not outdir:
outdir = os.getcwd()
files = {}
pdb_ids = ssbio.utils.force_lower_list(sorted(pdb_ids))
# If we want to save single PISA XML files per PDB ID...
if save_single_xml_files:
# Check for existing PISA XML files
if not force_rerun:
existing_files = [op.basename(x) for x in glob.glob(op.join(outdir, '*_multimers.pisa.xml'))]
# Store the paths to these files to return
files = {v.split('_')[0]: op.join(outdir, v) for v in existing_files}
log.debug('Already downloaded PISA files for {}'.format(list(files.keys())))
else:
existing_files = []
# Filter PDB IDs based on existing file
pdb_ids = [x for x in pdb_ids if '{}_multimers.pisa.xml'.format(x) not in existing_files]
# Split the list into 50 to limit requests
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
# Download PISA files
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
r = requests.get(all_pisa_link)
# Parse PISA file and save individual XML files
parser = etree.XMLParser(ns_clean=True)
tree = etree.fromstring(r.text, parser)
for pdb in tree.findall('pdb_entry'):
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdb.find('pdb_code').text))
add_root = etree.Element('pisa_multimers')
add_root.append(pdb)
with open(filename, 'wb') as f:
f.write(etree.tostring(add_root))
files[pdb.find('pdb_code').text] = filename
log.debug('{}: downloaded PISA results'.format(pdb))
else:
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdbs))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename):
r = requests.get(all_pisa_link)
with open(filename, 'w') as f:
f.write(r.text)
log.debug('Downloaded PISA results')
else:
log.debug('PISA results already downloaded')
for x in l:
files[x] = filename
return files | python | def download_pisa_multimers_xml(pdb_ids, save_single_xml_files=True, outdir=None, force_rerun=False):
"""Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded
"""
if not outdir:
outdir = os.getcwd()
files = {}
pdb_ids = ssbio.utils.force_lower_list(sorted(pdb_ids))
# If we want to save single PISA XML files per PDB ID...
if save_single_xml_files:
# Check for existing PISA XML files
if not force_rerun:
existing_files = [op.basename(x) for x in glob.glob(op.join(outdir, '*_multimers.pisa.xml'))]
# Store the paths to these files to return
files = {v.split('_')[0]: op.join(outdir, v) for v in existing_files}
log.debug('Already downloaded PISA files for {}'.format(list(files.keys())))
else:
existing_files = []
# Filter PDB IDs based on existing file
pdb_ids = [x for x in pdb_ids if '{}_multimers.pisa.xml'.format(x) not in existing_files]
# Split the list into 50 to limit requests
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
# Download PISA files
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
r = requests.get(all_pisa_link)
# Parse PISA file and save individual XML files
parser = etree.XMLParser(ns_clean=True)
tree = etree.fromstring(r.text, parser)
for pdb in tree.findall('pdb_entry'):
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdb.find('pdb_code').text))
add_root = etree.Element('pisa_multimers')
add_root.append(pdb)
with open(filename, 'wb') as f:
f.write(etree.tostring(add_root))
files[pdb.find('pdb_code').text] = filename
log.debug('{}: downloaded PISA results'.format(pdb))
else:
split_list = ssbio.utils.split_list_by_n(pdb_ids, 40)
for l in split_list:
pdbs = ','.join(l)
all_pisa_link = 'http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?{}'.format(pdbs)
filename = op.join(outdir, '{}_multimers.pisa.xml'.format(pdbs))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=filename):
r = requests.get(all_pisa_link)
with open(filename, 'w') as f:
f.write(r.text)
log.debug('Downloaded PISA results')
else:
log.debug('PISA results already downloaded')
for x in l:
files[x] = filename
return files | [
"def",
"download_pisa_multimers_xml",
"(",
"pdb_ids",
",",
"save_single_xml_files",
"=",
"True",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"os",
".",
"getcwd",
"(",
")",
"files",
"=",... | Download the PISA XML file for multimers.
See: http://www.ebi.ac.uk/pdbe/pisa/pi_download.html for more info
XML description of macromolecular assemblies:
http://www.ebi.ac.uk/pdbe/pisa/cgi-bin/multimers.pisa?pdbcodelist
where "pdbcodelist" is a comma-separated (strictly no spaces) list of PDB codes. The resulting file contain XML
output of assembly data, equivalent to that displayed in PISA assembly pages, for each of the specified PDB
entries. NOTE: If a mass-download is intended, please minimize the number of retrievals by specifying as many
PDB codes in the URL as feasible (20-50 is a good range), and never send another URL request until the previous
one has been completed (meaning that the multimers.pisa file has been downloaded). Excessive requests will
silently die in the server queue.
Args:
pdb_ids (str, list): PDB ID or list of IDs
save_single_xml_files (bool): If single XML files should be saved per PDB ID. If False, if multiple PDB IDs are
provided, then a single, combined XML output file is downloaded
outdir (str): Directory to output PISA XML files
force_rerun (bool): Redownload files if they already exist
Returns:
list: of files downloaded | [
"Download",
"the",
"PISA",
"XML",
"file",
"for",
"multimers",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/pisa.py#L19-L105 | train | 29,091 |
SBRG/ssbio | ssbio/core/genepro.py | GenePro.copy_modified_gene | def copy_modified_gene(self, modified_gene, ignore_model_attributes=True):
"""Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID.
Args:
modified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over.
ignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models.
"""
ignore = ['_model', '_reaction', '_functional', 'model', 'reaction', 'functional']
for attr in filter(lambda a: not a.startswith('__') and not isinstance(getattr(type(self), a, None), property) and not callable(getattr(self, a)),
dir(modified_gene)):
if attr not in ignore and ignore_model_attributes:
setattr(self, attr, getattr(modified_gene, attr)) | python | def copy_modified_gene(self, modified_gene, ignore_model_attributes=True):
"""Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID.
Args:
modified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over.
ignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models.
"""
ignore = ['_model', '_reaction', '_functional', 'model', 'reaction', 'functional']
for attr in filter(lambda a: not a.startswith('__') and not isinstance(getattr(type(self), a, None), property) and not callable(getattr(self, a)),
dir(modified_gene)):
if attr not in ignore and ignore_model_attributes:
setattr(self, attr, getattr(modified_gene, attr)) | [
"def",
"copy_modified_gene",
"(",
"self",
",",
"modified_gene",
",",
"ignore_model_attributes",
"=",
"True",
")",
":",
"ignore",
"=",
"[",
"'_model'",
",",
"'_reaction'",
",",
"'_functional'",
",",
"'model'",
",",
"'reaction'",
",",
"'functional'",
"]",
"for",
... | Copy attributes of a Gene object over to this Gene, given that the modified gene has the same ID.
Args:
modified_gene (Gene, GenePro): Gene with modified attributes that you want to copy over.
ignore_model_attributes (bool): If you want to ignore copying over attributes related to metabolic models. | [
"Copy",
"attributes",
"of",
"a",
"Gene",
"object",
"over",
"to",
"this",
"Gene",
"given",
"that",
"the",
"modified",
"gene",
"has",
"the",
"same",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/genepro.py#L71-L83 | train | 29,092 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.load_structure_path | def load_structure_path(self, structure_path, file_type):
"""Load a structure file and provide pointers to its location
Args:
structure_path (str): Path to structure file
file_type (str): Type of structure file
"""
if not file_type:
raise ValueError('File type must be specified')
self.file_type = file_type
self.structure_dir = op.dirname(structure_path)
self.structure_file = op.basename(structure_path) | python | def load_structure_path(self, structure_path, file_type):
"""Load a structure file and provide pointers to its location
Args:
structure_path (str): Path to structure file
file_type (str): Type of structure file
"""
if not file_type:
raise ValueError('File type must be specified')
self.file_type = file_type
self.structure_dir = op.dirname(structure_path)
self.structure_file = op.basename(structure_path) | [
"def",
"load_structure_path",
"(",
"self",
",",
"structure_path",
",",
"file_type",
")",
":",
"if",
"not",
"file_type",
":",
"raise",
"ValueError",
"(",
"'File type must be specified'",
")",
"self",
".",
"file_type",
"=",
"file_type",
"self",
".",
"structure_dir",... | Load a structure file and provide pointers to its location
Args:
structure_path (str): Path to structure file
file_type (str): Type of structure file | [
"Load",
"a",
"structure",
"file",
"and",
"provide",
"pointers",
"to",
"its",
"location"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L114-L128 | train | 29,093 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.parse_structure | def parse_structure(self, store_in_memory=False):
"""Read the 3D coordinates of a structure file and return it as a Biopython Structure object.
Also create ChainProp objects in the chains attribute for each chain in the first model.
Args:
store_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``.
Returns:
Structure: Biopython Structure object
"""
# TODO: perhaps add option to parse into ProDy object?
if not self.structure_file:
log.error('{}: no structure file, unable to parse'.format(self.id))
return None
else:
# Add Biopython structure object
structure = StructureIO(self.structure_path, self.file_type)
# Add all chains to self.chains as ChainProp objects
structure_chains = [x.id for x in structure.first_model.child_list]
self.add_chain_ids(structure_chains)
self.get_structure_seqs(structure.first_model)
# Also add all chains to self.mapped_chains ONLY if there are none specified
if not self.mapped_chains:
self.add_mapped_chain_ids(structure_chains)
if store_in_memory:
self.parsed = True
self.structure = structure
return structure | python | def parse_structure(self, store_in_memory=False):
"""Read the 3D coordinates of a structure file and return it as a Biopython Structure object.
Also create ChainProp objects in the chains attribute for each chain in the first model.
Args:
store_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``.
Returns:
Structure: Biopython Structure object
"""
# TODO: perhaps add option to parse into ProDy object?
if not self.structure_file:
log.error('{}: no structure file, unable to parse'.format(self.id))
return None
else:
# Add Biopython structure object
structure = StructureIO(self.structure_path, self.file_type)
# Add all chains to self.chains as ChainProp objects
structure_chains = [x.id for x in structure.first_model.child_list]
self.add_chain_ids(structure_chains)
self.get_structure_seqs(structure.first_model)
# Also add all chains to self.mapped_chains ONLY if there are none specified
if not self.mapped_chains:
self.add_mapped_chain_ids(structure_chains)
if store_in_memory:
self.parsed = True
self.structure = structure
return structure | [
"def",
"parse_structure",
"(",
"self",
",",
"store_in_memory",
"=",
"False",
")",
":",
"# TODO: perhaps add option to parse into ProDy object?",
"if",
"not",
"self",
".",
"structure_file",
":",
"log",
".",
"error",
"(",
"'{}: no structure file, unable to parse'",
".",
"... | Read the 3D coordinates of a structure file and return it as a Biopython Structure object.
Also create ChainProp objects in the chains attribute for each chain in the first model.
Args:
store_in_memory (bool): If the Biopython Structure object should be stored in the attribute ``structure``.
Returns:
Structure: Biopython Structure object | [
"Read",
"the",
"3D",
"coordinates",
"of",
"a",
"structure",
"file",
"and",
"return",
"it",
"as",
"a",
"Biopython",
"Structure",
"object",
".",
"Also",
"create",
"ChainProp",
"objects",
"in",
"the",
"chains",
"attribute",
"for",
"each",
"chain",
"in",
"the",
... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L130-L162 | train | 29,094 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.clean_structure | def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A',remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
if not self.structure_file:
log.error('{}: no structure file, unable to clean'.format(self.id))
return None
clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix,
outdir=outdir, force_rerun=force_rerun,
remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_chemicals=keep_chemicals,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains)
return clean_pdb_file | python | def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A',remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
if not self.structure_file:
log.error('{}: no structure file, unable to clean'.format(self.id))
return None
clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix,
outdir=outdir, force_rerun=force_rerun,
remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_chemicals=keep_chemicals,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains)
return clean_pdb_file | [
"def",
"clean_structure",
"(",
"self",
",",
"out_suffix",
"=",
"'_clean'",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
",",
"remove_atom_alt",
"=",
"True",
",",
"keep_atom_alt_id",
"=",
"'A'",
",",
"remove_atom_hydrogen",
"=",
"True",
",",
... | Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file | [
"Clean",
"the",
"structure",
"file",
"associated",
"with",
"this",
"structure",
"and",
"save",
"it",
"as",
"a",
"new",
"file",
".",
"Returns",
"the",
"file",
"path",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L164-L205 | train | 29,095 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.add_mapped_chain_ids | def add_mapped_chain_ids(self, mapped_chains):
"""Add chains by ID into the mapped_chains attribute
Args:
mapped_chains (str, list): Chain ID or list of IDs
"""
mapped_chains = ssbio.utils.force_list(mapped_chains)
for c in mapped_chains:
if c not in self.mapped_chains:
self.mapped_chains.append(c)
log.debug('{}: added to list of mapped chains'.format(c))
else:
log.debug('{}: chain already in list of mapped chains, not adding'.format(c)) | python | def add_mapped_chain_ids(self, mapped_chains):
"""Add chains by ID into the mapped_chains attribute
Args:
mapped_chains (str, list): Chain ID or list of IDs
"""
mapped_chains = ssbio.utils.force_list(mapped_chains)
for c in mapped_chains:
if c not in self.mapped_chains:
self.mapped_chains.append(c)
log.debug('{}: added to list of mapped chains'.format(c))
else:
log.debug('{}: chain already in list of mapped chains, not adding'.format(c)) | [
"def",
"add_mapped_chain_ids",
"(",
"self",
",",
"mapped_chains",
")",
":",
"mapped_chains",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"mapped_chains",
")",
"for",
"c",
"in",
"mapped_chains",
":",
"if",
"c",
"not",
"in",
"self",
".",
"mapped_chains... | Add chains by ID into the mapped_chains attribute
Args:
mapped_chains (str, list): Chain ID or list of IDs | [
"Add",
"chains",
"by",
"ID",
"into",
"the",
"mapped_chains",
"attribute"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L207-L221 | train | 29,096 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.add_chain_ids | def add_chain_ids(self, chains):
"""Add chains by ID into the chains attribute
Args:
chains (str, list): Chain ID or list of IDs
"""
chains = ssbio.utils.force_list(chains)
for c in chains:
if self.chains.has_id(c):
log.debug('{}: chain already present'.format(c))
else:
chain_prop = ChainProp(ident=c, pdb_parent=self.id)
self.chains.append(chain_prop)
log.debug('{}: added to chains list'.format(c)) | python | def add_chain_ids(self, chains):
"""Add chains by ID into the chains attribute
Args:
chains (str, list): Chain ID or list of IDs
"""
chains = ssbio.utils.force_list(chains)
for c in chains:
if self.chains.has_id(c):
log.debug('{}: chain already present'.format(c))
else:
chain_prop = ChainProp(ident=c, pdb_parent=self.id)
self.chains.append(chain_prop)
log.debug('{}: added to chains list'.format(c)) | [
"def",
"add_chain_ids",
"(",
"self",
",",
"chains",
")",
":",
"chains",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"chains",
")",
"for",
"c",
"in",
"chains",
":",
"if",
"self",
".",
"chains",
".",
"has_id",
"(",
"c",
")",
":",
"log",
".",
... | Add chains by ID into the chains attribute
Args:
chains (str, list): Chain ID or list of IDs | [
"Add",
"chains",
"by",
"ID",
"into",
"the",
"chains",
"attribute"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L223-L238 | train | 29,097 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.get_structure_seqs | def get_structure_seqs(self, model):
"""Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute.
Args:
model (Model): Biopython Model object of the structure you would like to parse
"""
# Don't overwrite existing ChainProp objects
dont_overwrite = []
chains = list(model.get_chains())
for x in chains:
if self.chains.has_id(x.id):
if self.chains.get_by_id(x.id).seq_record:
dont_overwrite.append(x.id)
if len(dont_overwrite) == len(chains):
log.debug('Not writing structure sequences, already stored')
return
# Returns the structures sequences with Xs added
structure_seqs = ssbio.protein.structure.properties.residues.get_structure_seqrecords(model)
log.debug('{}: gathered chain sequences'.format(self.id))
# Associate with ChainProps
for seq_record in structure_seqs:
log.debug('{}: adding chain sequence to ChainProp'.format(seq_record.id))
my_chain = self.chains.get_by_id(seq_record.id)
my_chain.seq_record = seq_record | python | def get_structure_seqs(self, model):
"""Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute.
Args:
model (Model): Biopython Model object of the structure you would like to parse
"""
# Don't overwrite existing ChainProp objects
dont_overwrite = []
chains = list(model.get_chains())
for x in chains:
if self.chains.has_id(x.id):
if self.chains.get_by_id(x.id).seq_record:
dont_overwrite.append(x.id)
if len(dont_overwrite) == len(chains):
log.debug('Not writing structure sequences, already stored')
return
# Returns the structures sequences with Xs added
structure_seqs = ssbio.protein.structure.properties.residues.get_structure_seqrecords(model)
log.debug('{}: gathered chain sequences'.format(self.id))
# Associate with ChainProps
for seq_record in structure_seqs:
log.debug('{}: adding chain sequence to ChainProp'.format(seq_record.id))
my_chain = self.chains.get_by_id(seq_record.id)
my_chain.seq_record = seq_record | [
"def",
"get_structure_seqs",
"(",
"self",
",",
"model",
")",
":",
"# Don't overwrite existing ChainProp objects",
"dont_overwrite",
"=",
"[",
"]",
"chains",
"=",
"list",
"(",
"model",
".",
"get_chains",
"(",
")",
")",
"for",
"x",
"in",
"chains",
":",
"if",
"... | Gather chain sequences and store in their corresponding ``ChainProp`` objects in the ``chains`` attribute.
Args:
model (Model): Biopython Model object of the structure you would like to parse | [
"Gather",
"chain",
"sequences",
"and",
"store",
"in",
"their",
"corresponding",
"ChainProp",
"objects",
"in",
"the",
"chains",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L240-L267 | train | 29,098 |
SBRG/ssbio | ssbio/protein/structure/structprop.py | StructProp.get_dict_with_chain | def get_dict_with_chain(self, chain, only_keys=None, chain_keys=None, exclude_attributes=None, df_format=False):
"""get_dict method which incorporates attributes found in a specific chain. Does not overwrite any attributes
in the original StructProp.
Args:
chain:
only_keys:
chain_keys:
exclude_attributes:
df_format:
Returns:
dict: attributes of StructProp + the chain specified
"""
# Choose attributes to return, return everything in the object if a list is not specified
if not only_keys:
keys = list(self.__dict__.keys())
else:
keys = ssbio.utils.force_list(only_keys)
# Remove keys you don't want returned
if exclude_attributes:
exclude_attributes = ssbio.utils.force_list(exclude_attributes)
for x in exclude_attributes:
if x in keys:
keys.remove(x)
else:
exclude_attributes = []
exclude_attributes.extend(['mapped_chains', 'chains'])
final_dict = {k: v for k, v in Object.get_dict(self, only_attributes=keys, exclude_attributes=exclude_attributes,
df_format=df_format).items()}
chain_prop = self.chains.get_by_id(chain)
# Filter out keys that show up in StructProp
if not chain_keys:
chain_keys = [x for x in chain_prop.get_dict().keys() if x not in final_dict]
chain_dict = chain_prop.get_dict(only_attributes=chain_keys, df_format=df_format)
final_dict.update(chain_dict)
return final_dict | python | def get_dict_with_chain(self, chain, only_keys=None, chain_keys=None, exclude_attributes=None, df_format=False):
"""get_dict method which incorporates attributes found in a specific chain. Does not overwrite any attributes
in the original StructProp.
Args:
chain:
only_keys:
chain_keys:
exclude_attributes:
df_format:
Returns:
dict: attributes of StructProp + the chain specified
"""
# Choose attributes to return, return everything in the object if a list is not specified
if not only_keys:
keys = list(self.__dict__.keys())
else:
keys = ssbio.utils.force_list(only_keys)
# Remove keys you don't want returned
if exclude_attributes:
exclude_attributes = ssbio.utils.force_list(exclude_attributes)
for x in exclude_attributes:
if x in keys:
keys.remove(x)
else:
exclude_attributes = []
exclude_attributes.extend(['mapped_chains', 'chains'])
final_dict = {k: v for k, v in Object.get_dict(self, only_attributes=keys, exclude_attributes=exclude_attributes,
df_format=df_format).items()}
chain_prop = self.chains.get_by_id(chain)
# Filter out keys that show up in StructProp
if not chain_keys:
chain_keys = [x for x in chain_prop.get_dict().keys() if x not in final_dict]
chain_dict = chain_prop.get_dict(only_attributes=chain_keys, df_format=df_format)
final_dict.update(chain_dict)
return final_dict | [
"def",
"get_dict_with_chain",
"(",
"self",
",",
"chain",
",",
"only_keys",
"=",
"None",
",",
"chain_keys",
"=",
"None",
",",
"exclude_attributes",
"=",
"None",
",",
"df_format",
"=",
"False",
")",
":",
"# Choose attributes to return, return everything in the object if... | get_dict method which incorporates attributes found in a specific chain. Does not overwrite any attributes
in the original StructProp.
Args:
chain:
only_keys:
chain_keys:
exclude_attributes:
df_format:
Returns:
dict: attributes of StructProp + the chain specified | [
"get_dict",
"method",
"which",
"incorporates",
"attributes",
"found",
"in",
"a",
"specific",
"chain",
".",
"Does",
"not",
"overwrite",
"any",
"attributes",
"in",
"the",
"original",
"StructProp",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L273-L317 | train | 29,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.