repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
SBRG/ssbio | ssbio/protein/structure/properties/residues.py | resname_in_proximity | def resname_in_proximity(resname, model, chains, resnums, threshold=5):
"""Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
"""
residues = [r for r in model.get_residues() if r.get_resname() == resname]
chains = ssbio.utils.force_list(chains)
resnums = ssbio.utils.force_list(resnums)
for chain in chains:
for resnum in resnums:
my_residue_last_atom = model[chain][resnum].child_list[-1]
for rz in residues:
distance = rz.child_list[-1] - my_residue_last_atom
if distance < threshold:
# print(resnum, rz, distance)
return True
return False | python | def resname_in_proximity(resname, model, chains, resnums, threshold=5):
"""Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
"""
residues = [r for r in model.get_residues() if r.get_resname() == resname]
chains = ssbio.utils.force_list(chains)
resnums = ssbio.utils.force_list(resnums)
for chain in chains:
for resnum in resnums:
my_residue_last_atom = model[chain][resnum].child_list[-1]
for rz in residues:
distance = rz.child_list[-1] - my_residue_last_atom
if distance < threshold:
# print(resnum, rz, distance)
return True
return False | [
"def",
"resname_in_proximity",
"(",
"resname",
",",
"model",
",",
"chains",
",",
"resnums",
",",
"threshold",
"=",
"5",
")",
":",
"residues",
"=",
"[",
"r",
"for",
"r",
"in",
"model",
".",
"get_residues",
"(",
")",
"if",
"r",
".",
"get_resname",
"(",
... | Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff | [
"Search",
"within",
"the",
"proximity",
"of",
"a",
"defined",
"list",
"of",
"residue",
"numbers",
"and",
"their",
"chains",
"for",
"any",
"specifed",
"residue",
"name",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/residues.py#L92-L120 | train | 28,900 |
SBRG/ssbio | ssbio/protein/structure/properties/residues.py | match_structure_sequence | def match_structure_sequence(orig_seq, new_seq, match='X', fill_with='X', ignore_excess=False):
"""Correct a sequence to match inserted X's in a structure sequence
This is useful for mapping a sequence obtained from structural tools like MSMS or DSSP
to the sequence obtained by the get_structure_seqs method.
Examples:
>>> structure_seq = 'XXXABCDEF'
>>> prop_list = [4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list)
['X', 'X', 'X', 4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list, fill_with=float('Inf'))
[inf, inf, inf, 4, 5, 6, 7, 8, 9]
>>> structure_seq = '---ABCDEF---'
>>> prop_list = ('H','H','H','C','C','C')
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
('-', '-', '-', 'H', 'H', 'H', 'C', 'C', 'C', '-', '-', '-')
>>> structure_seq = 'ABCDEF---'
>>> prop_list = 'HHHCCC'
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
'HHHCCC---'
>>> structure_seq = 'AXBXCXDXEXF'
>>> prop_list = ['H', 'H', 'H', 'C', 'C', 'C']
>>> match_structure_sequence(structure_seq, prop_list, match='X', fill_with='X')
['H', 'X', 'H', 'X', 'H', 'X', 'C', 'X', 'C', 'X', 'C']
Args:
orig_seq (str, Seq, SeqRecord): Sequence to match to
new_seq (str, tuple, list): Sequence to fill in
match (str): What to match
fill_with: What to fill in when matches are found
ignore_excess (bool): If excess sequence on the tail end of new_seq should be ignored
Returns:
str, tuple, list: new_seq which will match the length of orig_seq
"""
if len(orig_seq) == len(new_seq):
log.debug('Lengths already equal, nothing to fill in')
return new_seq
if not ignore_excess:
if len(orig_seq) < len(new_seq):
raise ValueError('Original sequence has a length less than the sequence provided to match to')
else:
log.debug('New sequence will be truncated to length of original sequence - information may be lost!')
if not isinstance(new_seq, str) and not isinstance(new_seq, tuple) and not isinstance(new_seq, list):
raise ValueError('Invalid sequence provided, must be string, tuple, or list')
orig_seq = ssbio.protein.sequence.utils.cast_to_str(orig_seq)
new_thing = deepcopy(new_seq)
if isinstance(new_seq, tuple):
new_thing = list(new_thing)
for i, s in enumerate(orig_seq):
if s == match:
if isinstance(new_thing, str):
new_thing = new_thing[:i] + fill_with + new_thing[i:]
if isinstance(new_thing, list):
new_thing.insert(i, fill_with)
new_thing = new_thing[:len(orig_seq)]
if isinstance(new_seq, tuple):
new_thing = tuple(new_thing)
return new_thing | python | def match_structure_sequence(orig_seq, new_seq, match='X', fill_with='X', ignore_excess=False):
"""Correct a sequence to match inserted X's in a structure sequence
This is useful for mapping a sequence obtained from structural tools like MSMS or DSSP
to the sequence obtained by the get_structure_seqs method.
Examples:
>>> structure_seq = 'XXXABCDEF'
>>> prop_list = [4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list)
['X', 'X', 'X', 4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list, fill_with=float('Inf'))
[inf, inf, inf, 4, 5, 6, 7, 8, 9]
>>> structure_seq = '---ABCDEF---'
>>> prop_list = ('H','H','H','C','C','C')
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
('-', '-', '-', 'H', 'H', 'H', 'C', 'C', 'C', '-', '-', '-')
>>> structure_seq = 'ABCDEF---'
>>> prop_list = 'HHHCCC'
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
'HHHCCC---'
>>> structure_seq = 'AXBXCXDXEXF'
>>> prop_list = ['H', 'H', 'H', 'C', 'C', 'C']
>>> match_structure_sequence(structure_seq, prop_list, match='X', fill_with='X')
['H', 'X', 'H', 'X', 'H', 'X', 'C', 'X', 'C', 'X', 'C']
Args:
orig_seq (str, Seq, SeqRecord): Sequence to match to
new_seq (str, tuple, list): Sequence to fill in
match (str): What to match
fill_with: What to fill in when matches are found
ignore_excess (bool): If excess sequence on the tail end of new_seq should be ignored
Returns:
str, tuple, list: new_seq which will match the length of orig_seq
"""
if len(orig_seq) == len(new_seq):
log.debug('Lengths already equal, nothing to fill in')
return new_seq
if not ignore_excess:
if len(orig_seq) < len(new_seq):
raise ValueError('Original sequence has a length less than the sequence provided to match to')
else:
log.debug('New sequence will be truncated to length of original sequence - information may be lost!')
if not isinstance(new_seq, str) and not isinstance(new_seq, tuple) and not isinstance(new_seq, list):
raise ValueError('Invalid sequence provided, must be string, tuple, or list')
orig_seq = ssbio.protein.sequence.utils.cast_to_str(orig_seq)
new_thing = deepcopy(new_seq)
if isinstance(new_seq, tuple):
new_thing = list(new_thing)
for i, s in enumerate(orig_seq):
if s == match:
if isinstance(new_thing, str):
new_thing = new_thing[:i] + fill_with + new_thing[i:]
if isinstance(new_thing, list):
new_thing.insert(i, fill_with)
new_thing = new_thing[:len(orig_seq)]
if isinstance(new_seq, tuple):
new_thing = tuple(new_thing)
return new_thing | [
"def",
"match_structure_sequence",
"(",
"orig_seq",
",",
"new_seq",
",",
"match",
"=",
"'X'",
",",
"fill_with",
"=",
"'X'",
",",
"ignore_excess",
"=",
"False",
")",
":",
"if",
"len",
"(",
"orig_seq",
")",
"==",
"len",
"(",
"new_seq",
")",
":",
"log",
"... | Correct a sequence to match inserted X's in a structure sequence
This is useful for mapping a sequence obtained from structural tools like MSMS or DSSP
to the sequence obtained by the get_structure_seqs method.
Examples:
>>> structure_seq = 'XXXABCDEF'
>>> prop_list = [4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list)
['X', 'X', 'X', 4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list, fill_with=float('Inf'))
[inf, inf, inf, 4, 5, 6, 7, 8, 9]
>>> structure_seq = '---ABCDEF---'
>>> prop_list = ('H','H','H','C','C','C')
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
('-', '-', '-', 'H', 'H', 'H', 'C', 'C', 'C', '-', '-', '-')
>>> structure_seq = 'ABCDEF---'
>>> prop_list = 'HHHCCC'
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
'HHHCCC---'
>>> structure_seq = 'AXBXCXDXEXF'
>>> prop_list = ['H', 'H', 'H', 'C', 'C', 'C']
>>> match_structure_sequence(structure_seq, prop_list, match='X', fill_with='X')
['H', 'X', 'H', 'X', 'H', 'X', 'C', 'X', 'C', 'X', 'C']
Args:
orig_seq (str, Seq, SeqRecord): Sequence to match to
new_seq (str, tuple, list): Sequence to fill in
match (str): What to match
fill_with: What to fill in when matches are found
ignore_excess (bool): If excess sequence on the tail end of new_seq should be ignored
Returns:
str, tuple, list: new_seq which will match the length of orig_seq | [
"Correct",
"a",
"sequence",
"to",
"match",
"inserted",
"X",
"s",
"in",
"a",
"structure",
"sequence"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/residues.py#L287-L358 | train | 28,901 |
SBRG/ssbio | ssbio/databases/hmmer.py | manual_get_pfam_annotations | def manual_get_pfam_annotations(seq, outpath, searchtype='phmmer', force_rerun=False):
"""Retrieve and download PFAM results from the HMMER search tool.
Args:
seq:
outpath:
searchtype:
force_rerun:
Returns:
Todo:
* Document and test!
"""
if op.exists(outpath):
with open(outpath, 'r') as f:
json_results = json.loads(json.load(f))
else:
fseq = '>Seq\n' + seq
if searchtype == 'phmmer':
parameters = {'seqdb': 'pdb', 'seq': fseq}
if searchtype == 'hmmscan':
parameters = {'hmmdb': 'pfam', 'seq': fseq}
enc_params = urllib.urlencode(parameters).encode('utf-8')
request = urllib2.Request('http://www.ebi.ac.uk/Tools/hmmer/search/{}'.format(searchtype), enc_params)
url = (urllib2.urlopen(request).geturl() + '?output=json')
request = str(url)
request_read = urlopen(request).read().decode("utf-8")
with open(outpath, 'w') as f:
json.dump(request_read, f)
json_results = json.loads(request_read)
return json_results['results']['hits'] | python | def manual_get_pfam_annotations(seq, outpath, searchtype='phmmer', force_rerun=False):
"""Retrieve and download PFAM results from the HMMER search tool.
Args:
seq:
outpath:
searchtype:
force_rerun:
Returns:
Todo:
* Document and test!
"""
if op.exists(outpath):
with open(outpath, 'r') as f:
json_results = json.loads(json.load(f))
else:
fseq = '>Seq\n' + seq
if searchtype == 'phmmer':
parameters = {'seqdb': 'pdb', 'seq': fseq}
if searchtype == 'hmmscan':
parameters = {'hmmdb': 'pfam', 'seq': fseq}
enc_params = urllib.urlencode(parameters).encode('utf-8')
request = urllib2.Request('http://www.ebi.ac.uk/Tools/hmmer/search/{}'.format(searchtype), enc_params)
url = (urllib2.urlopen(request).geturl() + '?output=json')
request = str(url)
request_read = urlopen(request).read().decode("utf-8")
with open(outpath, 'w') as f:
json.dump(request_read, f)
json_results = json.loads(request_read)
return json_results['results']['hits'] | [
"def",
"manual_get_pfam_annotations",
"(",
"seq",
",",
"outpath",
",",
"searchtype",
"=",
"'phmmer'",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"op",
".",
"exists",
"(",
"outpath",
")",
":",
"with",
"open",
"(",
"outpath",
",",
"'r'",
")",
"as",
... | Retrieve and download PFAM results from the HMMER search tool.
Args:
seq:
outpath:
searchtype:
force_rerun:
Returns:
Todo:
* Document and test! | [
"Retrieve",
"and",
"download",
"PFAM",
"results",
"from",
"the",
"HMMER",
"search",
"tool",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/hmmer.py#L10-L46 | train | 28,902 |
SBRG/ssbio | ssbio/utils.py | is_ipynb | def is_ipynb():
"""Return True if the module is running in IPython kernel,
False if in IPython shell or other Python shell.
Copied from: http://stackoverflow.com/a/37661854/1592810
There are other methods there too
>>> is_ipynb()
False
"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole?
return True
elif shell == 'TerminalInteractiveShell': # Terminal running IPython?
return False
else:
return False # Other type (?)
except NameError:
return False | python | def is_ipynb():
"""Return True if the module is running in IPython kernel,
False if in IPython shell or other Python shell.
Copied from: http://stackoverflow.com/a/37661854/1592810
There are other methods there too
>>> is_ipynb()
False
"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole?
return True
elif shell == 'TerminalInteractiveShell': # Terminal running IPython?
return False
else:
return False # Other type (?)
except NameError:
return False | [
"def",
"is_ipynb",
"(",
")",
":",
"try",
":",
"shell",
"=",
"get_ipython",
"(",
")",
".",
"__class__",
".",
"__name__",
"if",
"shell",
"==",
"'ZMQInteractiveShell'",
":",
"# Jupyter notebook or qtconsole?",
"return",
"True",
"elif",
"shell",
"==",
"'TerminalInte... | Return True if the module is running in IPython kernel,
False if in IPython shell or other Python shell.
Copied from: http://stackoverflow.com/a/37661854/1592810
There are other methods there too
>>> is_ipynb()
False | [
"Return",
"True",
"if",
"the",
"module",
"is",
"running",
"in",
"IPython",
"kernel",
"False",
"if",
"in",
"IPython",
"shell",
"or",
"other",
"Python",
"shell",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L26-L46 | train | 28,903 |
SBRG/ssbio | ssbio/utils.py | clean_single_dict | def clean_single_dict(indict, prepend_to_keys=None, remove_keys_containing=None):
"""Clean a dict with values that contain single item iterators to single items
Args:
indict (dict): Dictionary to be cleaned
prepend_to_keys (str): String to prepend to all keys
remove_keys_containing (str): Text to check for in keys to ignore
Returns:
dict: Cleaned dictionary
Examples:
>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']})
{'test1': 1, 'test2': 'H'}
>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_')
{'struct_test1': 1, 'struct_test2': 'H'}
>>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore')
{'struct_test1': 1}
"""
if not prepend_to_keys:
prepend_to_keys = ''
outdict = {}
for k, v in indict.items():
if remove_keys_containing:
if remove_keys_containing in k:
continue
outdict[prepend_to_keys + k] = v[0]
return outdict | python | def clean_single_dict(indict, prepend_to_keys=None, remove_keys_containing=None):
"""Clean a dict with values that contain single item iterators to single items
Args:
indict (dict): Dictionary to be cleaned
prepend_to_keys (str): String to prepend to all keys
remove_keys_containing (str): Text to check for in keys to ignore
Returns:
dict: Cleaned dictionary
Examples:
>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']})
{'test1': 1, 'test2': 'H'}
>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_')
{'struct_test1': 1, 'struct_test2': 'H'}
>>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore')
{'struct_test1': 1}
"""
if not prepend_to_keys:
prepend_to_keys = ''
outdict = {}
for k, v in indict.items():
if remove_keys_containing:
if remove_keys_containing in k:
continue
outdict[prepend_to_keys + k] = v[0]
return outdict | [
"def",
"clean_single_dict",
"(",
"indict",
",",
"prepend_to_keys",
"=",
"None",
",",
"remove_keys_containing",
"=",
"None",
")",
":",
"if",
"not",
"prepend_to_keys",
":",
"prepend_to_keys",
"=",
"''",
"outdict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"... | Clean a dict with values that contain single item iterators to single items
Args:
indict (dict): Dictionary to be cleaned
prepend_to_keys (str): String to prepend to all keys
remove_keys_containing (str): Text to check for in keys to ignore
Returns:
dict: Cleaned dictionary
Examples:
>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']})
{'test1': 1, 'test2': 'H'}
>>> clean_single_dict(indict={'test1': [1], 'test2': ['H']}, prepend_to_keys='struct_')
{'struct_test1': 1, 'struct_test2': 'H'}
>>> clean_single_dict(indict={'test1': [1], 'ignore': ['H']}, prepend_to_keys='struct_', remove_keys_containing='ignore')
{'struct_test1': 1} | [
"Clean",
"a",
"dict",
"with",
"values",
"that",
"contain",
"single",
"item",
"iterators",
"to",
"single",
"items"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L153-L185 | train | 28,904 |
SBRG/ssbio | ssbio/utils.py | double_check_attribute | def double_check_attribute(object, setter, backup_attribute, custom_error_text=None):
"""Check if a parameter to be used is None, if it is, then check the specified backup attribute and throw
an error if it is also None.
Args:
object: The original object
setter: Any input object
backup_attribute (str): Attribute in <object> to be double checked
custom_error_text (str): If a custom string for the error should be raised
Raises:
ValueError: If both setter and backup_attribute are None
"""
if not setter:
next_checker = getattr(object, backup_attribute)
if not next_checker:
if custom_error_text:
raise ValueError(custom_error_text)
else:
raise ValueError('Attribute replacing "{}" must be specified'.format(backup_attribute)) | python | def double_check_attribute(object, setter, backup_attribute, custom_error_text=None):
"""Check if a parameter to be used is None, if it is, then check the specified backup attribute and throw
an error if it is also None.
Args:
object: The original object
setter: Any input object
backup_attribute (str): Attribute in <object> to be double checked
custom_error_text (str): If a custom string for the error should be raised
Raises:
ValueError: If both setter and backup_attribute are None
"""
if not setter:
next_checker = getattr(object, backup_attribute)
if not next_checker:
if custom_error_text:
raise ValueError(custom_error_text)
else:
raise ValueError('Attribute replacing "{}" must be specified'.format(backup_attribute)) | [
"def",
"double_check_attribute",
"(",
"object",
",",
"setter",
",",
"backup_attribute",
",",
"custom_error_text",
"=",
"None",
")",
":",
"if",
"not",
"setter",
":",
"next_checker",
"=",
"getattr",
"(",
"object",
",",
"backup_attribute",
")",
"if",
"not",
"next... | Check if a parameter to be used is None, if it is, then check the specified backup attribute and throw
an error if it is also None.
Args:
object: The original object
setter: Any input object
backup_attribute (str): Attribute in <object> to be double checked
custom_error_text (str): If a custom string for the error should be raised
Raises:
ValueError: If both setter and backup_attribute are None | [
"Check",
"if",
"a",
"parameter",
"to",
"be",
"used",
"is",
"None",
"if",
"it",
"is",
"then",
"check",
"the",
"specified",
"backup",
"attribute",
"and",
"throw",
"an",
"error",
"if",
"it",
"is",
"also",
"None",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L199-L219 | train | 28,905 |
SBRG/ssbio | ssbio/utils.py | split_folder_and_path | def split_folder_and_path(filepath):
"""Split a file path into its folder, filename, and extension
Args:
path (str): Path to a file
Returns:
tuple: of (folder, filename (without extension), extension)
"""
dirname = op.dirname(filepath)
filename = op.basename(filepath)
splitext = op.splitext(filename)
filename_without_extension = splitext[0]
extension = splitext[1]
return dirname, filename_without_extension, extension | python | def split_folder_and_path(filepath):
"""Split a file path into its folder, filename, and extension
Args:
path (str): Path to a file
Returns:
tuple: of (folder, filename (without extension), extension)
"""
dirname = op.dirname(filepath)
filename = op.basename(filepath)
splitext = op.splitext(filename)
filename_without_extension = splitext[0]
extension = splitext[1]
return dirname, filename_without_extension, extension | [
"def",
"split_folder_and_path",
"(",
"filepath",
")",
":",
"dirname",
"=",
"op",
".",
"dirname",
"(",
"filepath",
")",
"filename",
"=",
"op",
".",
"basename",
"(",
"filepath",
")",
"splitext",
"=",
"op",
".",
"splitext",
"(",
"filename",
")",
"filename_wit... | Split a file path into its folder, filename, and extension
Args:
path (str): Path to a file
Returns:
tuple: of (folder, filename (without extension), extension) | [
"Split",
"a",
"file",
"path",
"into",
"its",
"folder",
"filename",
"and",
"extension"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L222-L238 | train | 28,906 |
SBRG/ssbio | ssbio/utils.py | outfile_maker | def outfile_maker(inname, outext='.out', outname='', outdir='', append_to_name=''):
"""Create a default name for an output file based on the inname name, unless a output name is specified.
Args:
inname: Path to input file
outext: Optional specified extension for output file (with the "."). Default is ".out".
outfile: Optional specified name of output file.
outdir: Optional path to output directory.
Returns:
str: Path to final output destination.
Examples:
>>> outfile_maker(inname='P00001.fasta')
'P00001.out'
>>> outfile_maker(inname='P00001')
'P00001.out'
>>> outfile_maker(inname='P00001.fasta', append_to_name='_new')
'P00001_new.out'
>>> outfile_maker(inname='P00001.fasta', outext='.mao')
'P00001.mao'
>>> outfile_maker(inname='P00001.fasta', outext='.mao', append_to_name='_new')
'P00001_new.mao'
>>> outfile_maker(inname='P00001.fasta', outext='.new', outname='P00001_aligned')
'P00001_aligned.new'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned')
'P00001_aligned.out'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', append_to_name='_new')
'P00001_aligned_new.out'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', outdir='/my/dir/')
'/my/dir/P00001_aligned.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', append_to_name='_new')
'/test/other/dir/P00001_new.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned')
'/test/other/dir/P00001_aligned.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned', outdir='/my/dir/')
'/my/dir/P00001_aligned.out'
"""
# TODO: CHECK IF OUTNAME IS A VALID FILE NAME!
orig_dir, orig_name, orig_ext = split_folder_and_path(inname)
# If output filename not provided, default is to take name of inname
if not outname:
outname = orig_name
# Create new path in the same directory of old path if a new one isn't specified
if not outdir:
outdir = orig_dir
# Append additional stuff to the filename if specified
if append_to_name:
outname += append_to_name
# Join the output filename and output extension
final_outfile = op.join(outdir, '{}{}'.format(outname, outext))
return final_outfile | python | def outfile_maker(inname, outext='.out', outname='', outdir='', append_to_name=''):
"""Create a default name for an output file based on the inname name, unless a output name is specified.
Args:
inname: Path to input file
outext: Optional specified extension for output file (with the "."). Default is ".out".
outfile: Optional specified name of output file.
outdir: Optional path to output directory.
Returns:
str: Path to final output destination.
Examples:
>>> outfile_maker(inname='P00001.fasta')
'P00001.out'
>>> outfile_maker(inname='P00001')
'P00001.out'
>>> outfile_maker(inname='P00001.fasta', append_to_name='_new')
'P00001_new.out'
>>> outfile_maker(inname='P00001.fasta', outext='.mao')
'P00001.mao'
>>> outfile_maker(inname='P00001.fasta', outext='.mao', append_to_name='_new')
'P00001_new.mao'
>>> outfile_maker(inname='P00001.fasta', outext='.new', outname='P00001_aligned')
'P00001_aligned.new'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned')
'P00001_aligned.out'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', append_to_name='_new')
'P00001_aligned_new.out'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', outdir='/my/dir/')
'/my/dir/P00001_aligned.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', append_to_name='_new')
'/test/other/dir/P00001_new.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned')
'/test/other/dir/P00001_aligned.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned', outdir='/my/dir/')
'/my/dir/P00001_aligned.out'
"""
# TODO: CHECK IF OUTNAME IS A VALID FILE NAME!
orig_dir, orig_name, orig_ext = split_folder_and_path(inname)
# If output filename not provided, default is to take name of inname
if not outname:
outname = orig_name
# Create new path in the same directory of old path if a new one isn't specified
if not outdir:
outdir = orig_dir
# Append additional stuff to the filename if specified
if append_to_name:
outname += append_to_name
# Join the output filename and output extension
final_outfile = op.join(outdir, '{}{}'.format(outname, outext))
return final_outfile | [
"def",
"outfile_maker",
"(",
"inname",
",",
"outext",
"=",
"'.out'",
",",
"outname",
"=",
"''",
",",
"outdir",
"=",
"''",
",",
"append_to_name",
"=",
"''",
")",
":",
"# TODO: CHECK IF OUTNAME IS A VALID FILE NAME!",
"orig_dir",
",",
"orig_name",
",",
"orig_ext",... | Create a default name for an output file based on the inname name, unless a output name is specified.
Args:
inname: Path to input file
outext: Optional specified extension for output file (with the "."). Default is ".out".
outfile: Optional specified name of output file.
outdir: Optional path to output directory.
Returns:
str: Path to final output destination.
Examples:
>>> outfile_maker(inname='P00001.fasta')
'P00001.out'
>>> outfile_maker(inname='P00001')
'P00001.out'
>>> outfile_maker(inname='P00001.fasta', append_to_name='_new')
'P00001_new.out'
>>> outfile_maker(inname='P00001.fasta', outext='.mao')
'P00001.mao'
>>> outfile_maker(inname='P00001.fasta', outext='.mao', append_to_name='_new')
'P00001_new.mao'
>>> outfile_maker(inname='P00001.fasta', outext='.new', outname='P00001_aligned')
'P00001_aligned.new'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned')
'P00001_aligned.out'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', append_to_name='_new')
'P00001_aligned_new.out'
>>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', outdir='/my/dir/')
'/my/dir/P00001_aligned.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', append_to_name='_new')
'/test/other/dir/P00001_new.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned')
'/test/other/dir/P00001_aligned.out'
>>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned', outdir='/my/dir/')
'/my/dir/P00001_aligned.out' | [
"Create",
"a",
"default",
"name",
"for",
"an",
"output",
"file",
"based",
"on",
"the",
"inname",
"name",
"unless",
"a",
"output",
"name",
"is",
"specified",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L254-L324 | train | 28,907 |
SBRG/ssbio | ssbio/utils.py | force_rerun | def force_rerun(flag, outfile):
"""Check if we should force rerunning of a command if an output file exists.
Args:
flag (bool): Flag to force rerun.
outfile (str): Path to output file which may already exist.
Returns:
bool: If we should force rerunning of a command
Examples:
>>> force_rerun(flag=True, outfile='/not/existing/file.txt')
True
>>> force_rerun(flag=False, outfile='/not/existing/file.txt')
True
>>> force_rerun(flag=True, outfile='./utils.py')
True
>>> force_rerun(flag=False, outfile='./utils.py')
False
"""
# If flag is True, always run
if flag:
return True
# If flag is False but file doesn't exist, also run
elif not flag and not op.exists(outfile):
return True
# If flag is False but filesize of output is 0, also run
elif not flag and not is_non_zero_file(outfile):
return True
# Otherwise, do not run
else:
return False | python | def force_rerun(flag, outfile):
"""Check if we should force rerunning of a command if an output file exists.
Args:
flag (bool): Flag to force rerun.
outfile (str): Path to output file which may already exist.
Returns:
bool: If we should force rerunning of a command
Examples:
>>> force_rerun(flag=True, outfile='/not/existing/file.txt')
True
>>> force_rerun(flag=False, outfile='/not/existing/file.txt')
True
>>> force_rerun(flag=True, outfile='./utils.py')
True
>>> force_rerun(flag=False, outfile='./utils.py')
False
"""
# If flag is True, always run
if flag:
return True
# If flag is False but file doesn't exist, also run
elif not flag and not op.exists(outfile):
return True
# If flag is False but filesize of output is 0, also run
elif not flag and not is_non_zero_file(outfile):
return True
# Otherwise, do not run
else:
return False | [
"def",
"force_rerun",
"(",
"flag",
",",
"outfile",
")",
":",
"# If flag is True, always run",
"if",
"flag",
":",
"return",
"True",
"# If flag is False but file doesn't exist, also run",
"elif",
"not",
"flag",
"and",
"not",
"op",
".",
"exists",
"(",
"outfile",
")",
... | Check if we should force rerunning of a command if an output file exists.
Args:
flag (bool): Flag to force rerun.
outfile (str): Path to output file which may already exist.
Returns:
bool: If we should force rerunning of a command
Examples:
>>> force_rerun(flag=True, outfile='/not/existing/file.txt')
True
>>> force_rerun(flag=False, outfile='/not/existing/file.txt')
True
>>> force_rerun(flag=True, outfile='./utils.py')
True
>>> force_rerun(flag=False, outfile='./utils.py')
False | [
"Check",
"if",
"we",
"should",
"force",
"rerunning",
"of",
"a",
"command",
"if",
"an",
"output",
"file",
"exists",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L327-L362 | train | 28,908 |
SBRG/ssbio | ssbio/utils.py | gunzip_file | def gunzip_file(infile, outfile=None, outdir=None, delete_original=False, force_rerun_flag=False):
"""Decompress a gzip file and optionally set output values.
Args:
infile: Path to .gz file
outfile: Name of output file
outdir: Path to output directory
delete_original: If original .gz file should be deleted
force_rerun_flag: If file should be decompressed if outfile already exists
Returns:
str: Path to decompressed file
"""
if not outfile:
outfile = infile.replace('.gz', '')
if not outdir:
outdir = ''
else:
outdir = op.dirname(infile)
outfile = op.join(outdir, op.basename(outfile))
if force_rerun(flag=force_rerun_flag, outfile=outfile):
gz = gzip.open(infile, "rb")
decoded = gz.read()
with open(outfile, "wb") as new_file:
new_file.write(decoded)
gz.close()
log.debug('{}: file unzipped'.format(outfile))
else:
log.debug('{}: file already unzipped'.format(outfile))
if delete_original:
os.remove(infile)
return outfile | python | def gunzip_file(infile, outfile=None, outdir=None, delete_original=False, force_rerun_flag=False):
"""Decompress a gzip file and optionally set output values.
Args:
infile: Path to .gz file
outfile: Name of output file
outdir: Path to output directory
delete_original: If original .gz file should be deleted
force_rerun_flag: If file should be decompressed if outfile already exists
Returns:
str: Path to decompressed file
"""
if not outfile:
outfile = infile.replace('.gz', '')
if not outdir:
outdir = ''
else:
outdir = op.dirname(infile)
outfile = op.join(outdir, op.basename(outfile))
if force_rerun(flag=force_rerun_flag, outfile=outfile):
gz = gzip.open(infile, "rb")
decoded = gz.read()
with open(outfile, "wb") as new_file:
new_file.write(decoded)
gz.close()
log.debug('{}: file unzipped'.format(outfile))
else:
log.debug('{}: file already unzipped'.format(outfile))
if delete_original:
os.remove(infile)
return outfile | [
"def",
"gunzip_file",
"(",
"infile",
",",
"outfile",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"delete_original",
"=",
"False",
",",
"force_rerun_flag",
"=",
"False",
")",
":",
"if",
"not",
"outfile",
":",
"outfile",
"=",
"infile",
".",
"replace",
"(... | Decompress a gzip file and optionally set output values.
Args:
infile: Path to .gz file
outfile: Name of output file
outdir: Path to output directory
delete_original: If original .gz file should be deleted
force_rerun_flag: If file should be decompressed if outfile already exists
Returns:
str: Path to decompressed file | [
"Decompress",
"a",
"gzip",
"file",
"and",
"optionally",
"set",
"output",
"values",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L365-L403 | train | 28,909 |
SBRG/ssbio | ssbio/utils.py | request_file | def request_file(link, outfile, force_rerun_flag=False):
"""Download a file given a URL if the outfile does not exist already.
Args:
link (str): Link to download file.
outfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does
exist, unless force_rerun_flag is True.
force_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.
Returns:
str: Path to downloaded file.
"""
if force_rerun(flag=force_rerun_flag, outfile=outfile):
req = requests.get(link)
if req.status_code == 200:
with open(outfile, 'w') as f:
f.write(req.text)
log.debug('Loaded and saved {} to {}'.format(link, outfile))
else:
log.error('{}: request error {}'.format(link, req.status_code))
return outfile | python | def request_file(link, outfile, force_rerun_flag=False):
"""Download a file given a URL if the outfile does not exist already.
Args:
link (str): Link to download file.
outfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does
exist, unless force_rerun_flag is True.
force_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.
Returns:
str: Path to downloaded file.
"""
if force_rerun(flag=force_rerun_flag, outfile=outfile):
req = requests.get(link)
if req.status_code == 200:
with open(outfile, 'w') as f:
f.write(req.text)
log.debug('Loaded and saved {} to {}'.format(link, outfile))
else:
log.error('{}: request error {}'.format(link, req.status_code))
return outfile | [
"def",
"request_file",
"(",
"link",
",",
"outfile",
",",
"force_rerun_flag",
"=",
"False",
")",
":",
"if",
"force_rerun",
"(",
"flag",
"=",
"force_rerun_flag",
",",
"outfile",
"=",
"outfile",
")",
":",
"req",
"=",
"requests",
".",
"get",
"(",
"link",
")"... | Download a file given a URL if the outfile does not exist already.
Args:
link (str): Link to download file.
outfile (str): Path to output file, will make a new file if it does not exist. Will not download if it does
exist, unless force_rerun_flag is True.
force_rerun_flag (bool): Flag to force re-downloading of the file if it exists already.
Returns:
str: Path to downloaded file. | [
"Download",
"a",
"file",
"given",
"a",
"URL",
"if",
"the",
"outfile",
"does",
"not",
"exist",
"already",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L406-L427 | train | 28,910 |
SBRG/ssbio | ssbio/utils.py | request_json | def request_json(link, outfile, force_rerun_flag, outdir=None):
"""Download a file in JSON format from a web request
Args:
link: Link to web request
outfile: Name of output file
outdir: Directory of output file
force_rerun_flag: If true, redownload the file
Returns:
dict: contents of the JSON request
"""
if not outdir:
outdir = ''
outfile = op.join(outdir, outfile)
if force_rerun(flag=force_rerun_flag, outfile=outfile):
text_raw = requests.get(link)
my_dict = text_raw.json()
with open(outfile, 'w') as f:
json.dump(my_dict, f)
log.debug('Loaded and saved {} to {}'.format(link, outfile))
else:
with open(outfile, 'r') as f:
my_dict = json.load(f)
log.debug('Loaded {}'.format(outfile))
return my_dict | python | def request_json(link, outfile, force_rerun_flag, outdir=None):
"""Download a file in JSON format from a web request
Args:
link: Link to web request
outfile: Name of output file
outdir: Directory of output file
force_rerun_flag: If true, redownload the file
Returns:
dict: contents of the JSON request
"""
if not outdir:
outdir = ''
outfile = op.join(outdir, outfile)
if force_rerun(flag=force_rerun_flag, outfile=outfile):
text_raw = requests.get(link)
my_dict = text_raw.json()
with open(outfile, 'w') as f:
json.dump(my_dict, f)
log.debug('Loaded and saved {} to {}'.format(link, outfile))
else:
with open(outfile, 'r') as f:
my_dict = json.load(f)
log.debug('Loaded {}'.format(outfile))
return my_dict | [
"def",
"request_json",
"(",
"link",
",",
"outfile",
",",
"force_rerun_flag",
",",
"outdir",
"=",
"None",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"''",
"outfile",
"=",
"op",
".",
"join",
"(",
"outdir",
",",
"outfile",
")",
"if",
"force_reru... | Download a file in JSON format from a web request
Args:
link: Link to web request
outfile: Name of output file
outdir: Directory of output file
force_rerun_flag: If true, redownload the file
Returns:
dict: contents of the JSON request | [
"Download",
"a",
"file",
"in",
"JSON",
"format",
"from",
"a",
"web",
"request"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L430-L459 | train | 28,911 |
SBRG/ssbio | ssbio/utils.py | command_runner | def command_runner(shell_command, force_rerun_flag, outfile_checker, cwd=None, silent=False):
"""Run a shell command with subprocess, with additional options to check if output file exists and printing stdout.
Args:
shell_command (str): Command as it would be formatted in the command-line (ie. "program -i test.in -o test.out").
force_rerun_flag: If the program should be rerun even if the output file exists.
outfile_checker (str): Name out the output file which may have been generated. This does not specify what the outfile
will be, that should be done in the program's args or predetermined.
cwd (str): Path to working directory where command will be executed.
silent (bool): If program STDOUT should be printed to the current shell.
Returns:
bool: If the program ran successfully.
"""
program_and_args = shlex.split(shell_command)
# Check if program is installed
if not program_exists(program_and_args[0]):
raise OSError('{}: program not installed'.format(program_and_args[0]))
# Format outfile if working in cwd
if cwd:
# TODO: should this be done, or should user explicitly define whole outfile path?
outfile_checker = op.join(cwd, op.basename(outfile_checker))
# Check for force rerunning
if force_rerun(flag=force_rerun_flag, outfile=outfile_checker):
if silent:
command = subprocess.Popen(program_and_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
out, err = command.communicate()
ret = command.returncode
else:
# Prints output
for path in execute(cmd=program_and_args, cwd=cwd):
print(path, end="")
# TODO: check return code and log properly
log.debug('{}: Ran program, output to {}'.format(program_and_args[0], outfile_checker))
else:
log.debug('{}: Output already exists'.format(outfile_checker)) | python | def command_runner(shell_command, force_rerun_flag, outfile_checker, cwd=None, silent=False):
"""Run a shell command with subprocess, with additional options to check if output file exists and printing stdout.
Args:
shell_command (str): Command as it would be formatted in the command-line (ie. "program -i test.in -o test.out").
force_rerun_flag: If the program should be rerun even if the output file exists.
outfile_checker (str): Name out the output file which may have been generated. This does not specify what the outfile
will be, that should be done in the program's args or predetermined.
cwd (str): Path to working directory where command will be executed.
silent (bool): If program STDOUT should be printed to the current shell.
Returns:
bool: If the program ran successfully.
"""
program_and_args = shlex.split(shell_command)
# Check if program is installed
if not program_exists(program_and_args[0]):
raise OSError('{}: program not installed'.format(program_and_args[0]))
# Format outfile if working in cwd
if cwd:
# TODO: should this be done, or should user explicitly define whole outfile path?
outfile_checker = op.join(cwd, op.basename(outfile_checker))
# Check for force rerunning
if force_rerun(flag=force_rerun_flag, outfile=outfile_checker):
if silent:
command = subprocess.Popen(program_and_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
out, err = command.communicate()
ret = command.returncode
else:
# Prints output
for path in execute(cmd=program_and_args, cwd=cwd):
print(path, end="")
# TODO: check return code and log properly
log.debug('{}: Ran program, output to {}'.format(program_and_args[0], outfile_checker))
else:
log.debug('{}: Output already exists'.format(outfile_checker)) | [
"def",
"command_runner",
"(",
"shell_command",
",",
"force_rerun_flag",
",",
"outfile_checker",
",",
"cwd",
"=",
"None",
",",
"silent",
"=",
"False",
")",
":",
"program_and_args",
"=",
"shlex",
".",
"split",
"(",
"shell_command",
")",
"# Check if program is instal... | Run a shell command with subprocess, with additional options to check if output file exists and printing stdout.
Args:
shell_command (str): Command as it would be formatted in the command-line (ie. "program -i test.in -o test.out").
force_rerun_flag: If the program should be rerun even if the output file exists.
outfile_checker (str): Name out the output file which may have been generated. This does not specify what the outfile
will be, that should be done in the program's args or predetermined.
cwd (str): Path to working directory where command will be executed.
silent (bool): If program STDOUT should be printed to the current shell.
Returns:
bool: If the program ran successfully. | [
"Run",
"a",
"shell",
"command",
"with",
"subprocess",
"with",
"additional",
"options",
"to",
"check",
"if",
"output",
"file",
"exists",
"and",
"printing",
"stdout",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L478-L518 | train | 28,912 |
SBRG/ssbio | ssbio/utils.py | dict_head | def dict_head(d, N=5):
"""Return the head of a dictionary. It will be random!
Default is to return the first 5 key/value pairs in a dictionary.
Args:
d: Dictionary to get head.
N: Number of elements to display.
Returns:
dict: the first N items of the dictionary.
"""
return {k: d[k] for k in list(d.keys())[:N]} | python | def dict_head(d, N=5):
"""Return the head of a dictionary. It will be random!
Default is to return the first 5 key/value pairs in a dictionary.
Args:
d: Dictionary to get head.
N: Number of elements to display.
Returns:
dict: the first N items of the dictionary.
"""
return {k: d[k] for k in list(d.keys())[:N]} | [
"def",
"dict_head",
"(",
"d",
",",
"N",
"=",
"5",
")",
":",
"return",
"{",
"k",
":",
"d",
"[",
"k",
"]",
"for",
"k",
"in",
"list",
"(",
"d",
".",
"keys",
"(",
")",
")",
"[",
":",
"N",
"]",
"}"
] | Return the head of a dictionary. It will be random!
Default is to return the first 5 key/value pairs in a dictionary.
Args:
d: Dictionary to get head.
N: Number of elements to display.
Returns:
dict: the first N items of the dictionary. | [
"Return",
"the",
"head",
"of",
"a",
"dictionary",
".",
"It",
"will",
"be",
"random!"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L551-L564 | train | 28,913 |
SBRG/ssbio | ssbio/utils.py | rank_dated_files | def rank_dated_files(pattern, dir, descending=True):
"""Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename.
"""
files = glob.glob(op.join(dir, pattern))
return sorted(files, reverse=descending) | python | def rank_dated_files(pattern, dir, descending=True):
"""Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename.
"""
files = glob.glob(op.join(dir, pattern))
return sorted(files, reverse=descending) | [
"def",
"rank_dated_files",
"(",
"pattern",
",",
"dir",
",",
"descending",
"=",
"True",
")",
":",
"files",
"=",
"glob",
".",
"glob",
"(",
"op",
".",
"join",
"(",
"dir",
",",
"pattern",
")",
")",
"return",
"sorted",
"(",
"files",
",",
"reverse",
"=",
... | Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename. | [
"Search",
"a",
"directory",
"for",
"files",
"that",
"match",
"a",
"pattern",
".",
"Return",
"an",
"ordered",
"list",
"of",
"these",
"files",
"by",
"filename",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L567-L580 | train | 28,914 |
SBRG/ssbio | ssbio/utils.py | find | def find(lst, a, case_sensitive=True):
"""Return indices of a list which have elements that match an object or list of objects
Args:
lst: list of values
a: object(s) to check equality
case_sensitive: if the search should be case sensitive
Returns:
list: list of indicies of lst which equal a
"""
a = force_list(a)
if not case_sensitive:
lst = [x.lower() for x in lst]
a = [y.lower() for y in a]
return [i for i, x in enumerate(lst) if x in a] | python | def find(lst, a, case_sensitive=True):
"""Return indices of a list which have elements that match an object or list of objects
Args:
lst: list of values
a: object(s) to check equality
case_sensitive: if the search should be case sensitive
Returns:
list: list of indicies of lst which equal a
"""
a = force_list(a)
if not case_sensitive:
lst = [x.lower() for x in lst]
a = [y.lower() for y in a]
return [i for i, x in enumerate(lst) if x in a] | [
"def",
"find",
"(",
"lst",
",",
"a",
",",
"case_sensitive",
"=",
"True",
")",
":",
"a",
"=",
"force_list",
"(",
"a",
")",
"if",
"not",
"case_sensitive",
":",
"lst",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"lst",
"]",
"a",
"=",
... | Return indices of a list which have elements that match an object or list of objects
Args:
lst: list of values
a: object(s) to check equality
case_sensitive: if the search should be case sensitive
Returns:
list: list of indicies of lst which equal a | [
"Return",
"indices",
"of",
"a",
"list",
"which",
"have",
"elements",
"that",
"match",
"an",
"object",
"or",
"list",
"of",
"objects"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L583-L601 | train | 28,915 |
SBRG/ssbio | ssbio/utils.py | filter_list | def filter_list(lst, takeout, case_sensitive=True):
"""Return a modified list removing items specified.
Args:
lst: Original list of values
takeout: Object or objects to remove from lst
case_sensitive: if the search should be case sensitive
Returns:
list: Filtered list of values
"""
takeout = force_list(takeout)
if not case_sensitive:
lst = [x.lower() for x in lst]
takeout = [y.lower() for y in takeout]
return [x for x in lst if x not in takeout] | python | def filter_list(lst, takeout, case_sensitive=True):
"""Return a modified list removing items specified.
Args:
lst: Original list of values
takeout: Object or objects to remove from lst
case_sensitive: if the search should be case sensitive
Returns:
list: Filtered list of values
"""
takeout = force_list(takeout)
if not case_sensitive:
lst = [x.lower() for x in lst]
takeout = [y.lower() for y in takeout]
return [x for x in lst if x not in takeout] | [
"def",
"filter_list",
"(",
"lst",
",",
"takeout",
",",
"case_sensitive",
"=",
"True",
")",
":",
"takeout",
"=",
"force_list",
"(",
"takeout",
")",
"if",
"not",
"case_sensitive",
":",
"lst",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"ls... | Return a modified list removing items specified.
Args:
lst: Original list of values
takeout: Object or objects to remove from lst
case_sensitive: if the search should be case sensitive
Returns:
list: Filtered list of values | [
"Return",
"a",
"modified",
"list",
"removing",
"items",
"specified",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L625-L643 | train | 28,916 |
SBRG/ssbio | ssbio/utils.py | filter_list_by_indices | def filter_list_by_indices(lst, indices):
"""Return a modified list containing only the indices indicated.
Args:
lst: Original list of values
indices: List of indices to keep from the original list
Returns:
list: Filtered list of values
"""
return [x for i, x in enumerate(lst) if i in indices] | python | def filter_list_by_indices(lst, indices):
"""Return a modified list containing only the indices indicated.
Args:
lst: Original list of values
indices: List of indices to keep from the original list
Returns:
list: Filtered list of values
"""
return [x for i, x in enumerate(lst) if i in indices] | [
"def",
"filter_list_by_indices",
"(",
"lst",
",",
"indices",
")",
":",
"return",
"[",
"x",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"lst",
")",
"if",
"i",
"in",
"indices",
"]"
] | Return a modified list containing only the indices indicated.
Args:
lst: Original list of values
indices: List of indices to keep from the original list
Returns:
list: Filtered list of values | [
"Return",
"a",
"modified",
"list",
"containing",
"only",
"the",
"indices",
"indicated",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L646-L657 | train | 28,917 |
SBRG/ssbio | ssbio/utils.py | force_string | def force_string(val=None):
"""Force a string representation of an object
Args:
val: object to parse into a string
Returns:
str: String representation
"""
if val is None:
return ''
if isinstance(val, list):
newval = [str(x) for x in val]
return ';'.join(newval)
if isinstance(val, str):
return val
else:
return str(val) | python | def force_string(val=None):
"""Force a string representation of an object
Args:
val: object to parse into a string
Returns:
str: String representation
"""
if val is None:
return ''
if isinstance(val, list):
newval = [str(x) for x in val]
return ';'.join(newval)
if isinstance(val, str):
return val
else:
return str(val) | [
"def",
"force_string",
"(",
"val",
"=",
"None",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"''",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"newval",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"val",
"]",
"return",
"... | Force a string representation of an object
Args:
val: object to parse into a string
Returns:
str: String representation | [
"Force",
"a",
"string",
"representation",
"of",
"an",
"object"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L660-L678 | train | 28,918 |
SBRG/ssbio | ssbio/utils.py | force_list | def force_list(val=None):
"""Force a list representation of an object
Args:
val: object to parse into a list
Returns:
"""
if val is None:
return []
if isinstance(val, pd.Series):
return val.tolist()
return val if isinstance(val, list) else [val] | python | def force_list(val=None):
"""Force a list representation of an object
Args:
val: object to parse into a list
Returns:
"""
if val is None:
return []
if isinstance(val, pd.Series):
return val.tolist()
return val if isinstance(val, list) else [val] | [
"def",
"force_list",
"(",
"val",
"=",
"None",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"[",
"]",
"if",
"isinstance",
"(",
"val",
",",
"pd",
".",
"Series",
")",
":",
"return",
"val",
".",
"tolist",
"(",
")",
"return",
"val",
"if",
"isin... | Force a list representation of an object
Args:
val: object to parse into a list
Returns: | [
"Force",
"a",
"list",
"representation",
"of",
"an",
"object"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L681-L694 | train | 28,919 |
SBRG/ssbio | ssbio/utils.py | split_list_by_n | def split_list_by_n(l, n):
"""Split a list into lists of size n.
Args:
l: List of stuff.
n: Size of new lists.
Returns:
list: List of lists each of size n derived from l.
"""
n = max(1, n)
return list(l[i:i+n] for i in range(0, len(l), n)) | python | def split_list_by_n(l, n):
"""Split a list into lists of size n.
Args:
l: List of stuff.
n: Size of new lists.
Returns:
list: List of lists each of size n derived from l.
"""
n = max(1, n)
return list(l[i:i+n] for i in range(0, len(l), n)) | [
"def",
"split_list_by_n",
"(",
"l",
",",
"n",
")",
":",
"n",
"=",
"max",
"(",
"1",
",",
"n",
")",
"return",
"list",
"(",
"l",
"[",
"i",
":",
"i",
"+",
"n",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"l",
")",
",",
"n",
"... | Split a list into lists of size n.
Args:
l: List of stuff.
n: Size of new lists.
Returns:
list: List of lists each of size n derived from l. | [
"Split",
"a",
"list",
"into",
"lists",
"of",
"size",
"n",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L723-L735 | train | 28,920 |
SBRG/ssbio | ssbio/utils.py | input_list_parser | def input_list_parser(infile_list):
"""Always return a list of files with varying input.
>>> input_list_parser(['/path/to/folder/'])
['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']
>>> input_list_parser(['/path/to/file.txt'])
['/path/to/file.txt']
>>> input_list_parser(['file1.txt'])
['file1.txt']
Args:
infile_list: List of arguments
Returns:
list: Standardized list of files
"""
final_list_of_files = []
for x in infile_list:
# If the input is a folder
if op.isdir(x):
os.chdir(x)
final_list_of_files.extend(glob.glob('*'))
# If the input is a file
if op.isfile(x):
final_list_of_files.append(x)
return final_list_of_files | python | def input_list_parser(infile_list):
"""Always return a list of files with varying input.
>>> input_list_parser(['/path/to/folder/'])
['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']
>>> input_list_parser(['/path/to/file.txt'])
['/path/to/file.txt']
>>> input_list_parser(['file1.txt'])
['file1.txt']
Args:
infile_list: List of arguments
Returns:
list: Standardized list of files
"""
final_list_of_files = []
for x in infile_list:
# If the input is a folder
if op.isdir(x):
os.chdir(x)
final_list_of_files.extend(glob.glob('*'))
# If the input is a file
if op.isfile(x):
final_list_of_files.append(x)
return final_list_of_files | [
"def",
"input_list_parser",
"(",
"infile_list",
")",
":",
"final_list_of_files",
"=",
"[",
"]",
"for",
"x",
"in",
"infile_list",
":",
"# If the input is a folder",
"if",
"op",
".",
"isdir",
"(",
"x",
")",
":",
"os",
".",
"chdir",
"(",
"x",
")",
"final_list... | Always return a list of files with varying input.
>>> input_list_parser(['/path/to/folder/'])
['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']
>>> input_list_parser(['/path/to/file.txt'])
['/path/to/file.txt']
>>> input_list_parser(['file1.txt'])
['file1.txt']
Args:
infile_list: List of arguments
Returns:
list: Standardized list of files | [
"Always",
"return",
"a",
"list",
"of",
"files",
"with",
"varying",
"input",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L752-L785 | train | 28,921 |
SBRG/ssbio | ssbio/utils.py | flatlist_dropdup | def flatlist_dropdup(list_of_lists):
"""Make a single list out of a list of lists, and drop all duplicates.
Args:
list_of_lists: List of lists.
Returns:
list: List of single objects.
"""
return list(set([str(item) for sublist in list_of_lists for item in sublist])) | python | def flatlist_dropdup(list_of_lists):
"""Make a single list out of a list of lists, and drop all duplicates.
Args:
list_of_lists: List of lists.
Returns:
list: List of single objects.
"""
return list(set([str(item) for sublist in list_of_lists for item in sublist])) | [
"def",
"flatlist_dropdup",
"(",
"list_of_lists",
")",
":",
"return",
"list",
"(",
"set",
"(",
"[",
"str",
"(",
"item",
")",
"for",
"sublist",
"in",
"list_of_lists",
"for",
"item",
"in",
"sublist",
"]",
")",
")"
] | Make a single list out of a list of lists, and drop all duplicates.
Args:
list_of_lists: List of lists.
Returns:
list: List of single objects. | [
"Make",
"a",
"single",
"list",
"out",
"of",
"a",
"list",
"of",
"lists",
"and",
"drop",
"all",
"duplicates",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L788-L798 | train | 28,922 |
SBRG/ssbio | ssbio/utils.py | scale_calculator | def scale_calculator(multiplier, elements, rescale=None):
"""Get a dictionary of scales for each element in elements.
Examples:
>>> scale_calculator(1, [2,7,8])
{8: 1, 2: 1, 7: 1}
>>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8])
{2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 1, 8: 1}
>>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8], rescale=(0.5,1))
{2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5}
>>> scale_calculator(1, {2:3, 3:1, 4:1, 5:2, 6:1, 7:1, 8:1}, rescale=(0.5,1))
{2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5}
>>> scale_calculator(1, [(2,2,2),(3,),(4,),(5,),(5,),(6,7,8)], rescale=(0.5,1))
{(2, 2, 2): 0.5, (3,): 0.5, (6, 7, 8): 0.5, (4,): 0.5, (5,): 1.0}
>>> scale_calculator(1, {77:35, 80:35, 16:1}, rescale=(.99,1))
None
Args:
mutiplier (int, float): Base float to be multiplied
elements (list, dict): Dictionary which contains object:count
or list of objects that may have repeats which will be counted
rescale (tuple): Min and max values to rescale to
Returns:
dict: Scaled values of mutiplier for each element in elements
"""
# TODO: think about what happens when:
# TODO: 1. there is only one (or n) of each element, and rescale is set to seomthing. what is the original min/max to scale from?
# TODO: 2. can we normalize the scale based on other counts? (ie. other gene mutation frequencies)
if isinstance(elements, list):
unique_elements = list(set(elements))
scales = {}
for x in unique_elements:
count = elements.count(x)
scales[x] = multiplier * count
elif isinstance(elements, dict):
scales = {}
for k,count in elements.items():
scales[k] = multiplier * int(count)
else:
raise ValueError('Input list of elements or dictionary of elements & counts')
if not rescale:
return scales
else:
new_scales = {}
for k,v in scales.items():
new_scales[k] = remap(v, min(scales.values()), max(scales.values()), rescale[0], rescale[1])
return new_scales | python | def scale_calculator(multiplier, elements, rescale=None):
"""Get a dictionary of scales for each element in elements.
Examples:
>>> scale_calculator(1, [2,7,8])
{8: 1, 2: 1, 7: 1}
>>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8])
{2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 1, 8: 1}
>>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8], rescale=(0.5,1))
{2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5}
>>> scale_calculator(1, {2:3, 3:1, 4:1, 5:2, 6:1, 7:1, 8:1}, rescale=(0.5,1))
{2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5}
>>> scale_calculator(1, [(2,2,2),(3,),(4,),(5,),(5,),(6,7,8)], rescale=(0.5,1))
{(2, 2, 2): 0.5, (3,): 0.5, (6, 7, 8): 0.5, (4,): 0.5, (5,): 1.0}
>>> scale_calculator(1, {77:35, 80:35, 16:1}, rescale=(.99,1))
None
Args:
mutiplier (int, float): Base float to be multiplied
elements (list, dict): Dictionary which contains object:count
or list of objects that may have repeats which will be counted
rescale (tuple): Min and max values to rescale to
Returns:
dict: Scaled values of mutiplier for each element in elements
"""
# TODO: think about what happens when:
# TODO: 1. there is only one (or n) of each element, and rescale is set to seomthing. what is the original min/max to scale from?
# TODO: 2. can we normalize the scale based on other counts? (ie. other gene mutation frequencies)
if isinstance(elements, list):
unique_elements = list(set(elements))
scales = {}
for x in unique_elements:
count = elements.count(x)
scales[x] = multiplier * count
elif isinstance(elements, dict):
scales = {}
for k,count in elements.items():
scales[k] = multiplier * int(count)
else:
raise ValueError('Input list of elements or dictionary of elements & counts')
if not rescale:
return scales
else:
new_scales = {}
for k,v in scales.items():
new_scales[k] = remap(v, min(scales.values()), max(scales.values()), rescale[0], rescale[1])
return new_scales | [
"def",
"scale_calculator",
"(",
"multiplier",
",",
"elements",
",",
"rescale",
"=",
"None",
")",
":",
"# TODO: think about what happens when:",
"# TODO: 1. there is only one (or n) of each element, and rescale is set to seomthing. what is the original min/max to scale from?",
"# TODO: 2.... | Get a dictionary of scales for each element in elements.
Examples:
>>> scale_calculator(1, [2,7,8])
{8: 1, 2: 1, 7: 1}
>>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8])
{2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 1, 8: 1}
>>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8], rescale=(0.5,1))
{2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5}
>>> scale_calculator(1, {2:3, 3:1, 4:1, 5:2, 6:1, 7:1, 8:1}, rescale=(0.5,1))
{2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5}
>>> scale_calculator(1, [(2,2,2),(3,),(4,),(5,),(5,),(6,7,8)], rescale=(0.5,1))
{(2, 2, 2): 0.5, (3,): 0.5, (6, 7, 8): 0.5, (4,): 0.5, (5,): 1.0}
>>> scale_calculator(1, {77:35, 80:35, 16:1}, rescale=(.99,1))
None
Args:
mutiplier (int, float): Base float to be multiplied
elements (list, dict): Dictionary which contains object:count
or list of objects that may have repeats which will be counted
rescale (tuple): Min and max values to rescale to
Returns:
dict: Scaled values of mutiplier for each element in elements | [
"Get",
"a",
"dictionary",
"of",
"scales",
"for",
"each",
"element",
"in",
"elements",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L929-L985 | train | 28,923 |
SBRG/ssbio | ssbio/utils.py | label_sequential_regions | def label_sequential_regions(inlist):
"""Input a list of labeled tuples and return a dictionary of sequentially labeled regions.
Args:
inlist (list): A list of tuples with the first number representing the index and the second the index label.
Returns:
dict: Dictionary of labeled regions.
Examples:
>>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])
{'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}
"""
import more_itertools as mit
df = pd.DataFrame(inlist).set_index(0)
labeled = {}
for label in df[1].unique():
iterable = df[df[1] == label].index.tolist()
labeled.update({'{}{}'.format(label, i + 1): items for i, items in
enumerate([list(group) for group in mit.consecutive_groups(iterable)])})
return labeled | python | def label_sequential_regions(inlist):
"""Input a list of labeled tuples and return a dictionary of sequentially labeled regions.
Args:
inlist (list): A list of tuples with the first number representing the index and the second the index label.
Returns:
dict: Dictionary of labeled regions.
Examples:
>>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])
{'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}
"""
import more_itertools as mit
df = pd.DataFrame(inlist).set_index(0)
labeled = {}
for label in df[1].unique():
iterable = df[df[1] == label].index.tolist()
labeled.update({'{}{}'.format(label, i + 1): items for i, items in
enumerate([list(group) for group in mit.consecutive_groups(iterable)])})
return labeled | [
"def",
"label_sequential_regions",
"(",
"inlist",
")",
":",
"import",
"more_itertools",
"as",
"mit",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"inlist",
")",
".",
"set_index",
"(",
"0",
")",
"labeled",
"=",
"{",
"}",
"for",
"label",
"in",
"df",
"[",
"1",... | Input a list of labeled tuples and return a dictionary of sequentially labeled regions.
Args:
inlist (list): A list of tuples with the first number representing the index and the second the index label.
Returns:
dict: Dictionary of labeled regions.
Examples:
>>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')])
{'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]} | [
"Input",
"a",
"list",
"of",
"labeled",
"tuples",
"and",
"return",
"a",
"dictionary",
"of",
"sequentially",
"labeled",
"regions",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L988-L1013 | train | 28,924 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.sequence_path | def sequence_path(self, fasta_path):
"""Provide pointers to the paths of the FASTA file
Args:
fasta_path: Path to FASTA file
"""
if not fasta_path:
self.sequence_dir = None
self.sequence_file = None
else:
if not op.exists(fasta_path):
raise OSError('{}: file does not exist'.format(fasta_path))
if not op.dirname(fasta_path):
self.sequence_dir = '.'
else:
self.sequence_dir = op.dirname(fasta_path)
self.sequence_file = op.basename(fasta_path)
tmp_sr = SeqIO.read(fasta_path, 'fasta')
if self.name == '<unknown name>':
self.name = tmp_sr.name
if self.description == '<unknown description>':
self.description = tmp_sr.description
if not self.dbxrefs:
self.dbxrefs = tmp_sr.dbxrefs
if not self.features:
self.features = tmp_sr.features
if not self.annotations:
self.annotations = tmp_sr.annotations
if not self.letter_annotations:
self.letter_annotations = tmp_sr.letter_annotations | python | def sequence_path(self, fasta_path):
"""Provide pointers to the paths of the FASTA file
Args:
fasta_path: Path to FASTA file
"""
if not fasta_path:
self.sequence_dir = None
self.sequence_file = None
else:
if not op.exists(fasta_path):
raise OSError('{}: file does not exist'.format(fasta_path))
if not op.dirname(fasta_path):
self.sequence_dir = '.'
else:
self.sequence_dir = op.dirname(fasta_path)
self.sequence_file = op.basename(fasta_path)
tmp_sr = SeqIO.read(fasta_path, 'fasta')
if self.name == '<unknown name>':
self.name = tmp_sr.name
if self.description == '<unknown description>':
self.description = tmp_sr.description
if not self.dbxrefs:
self.dbxrefs = tmp_sr.dbxrefs
if not self.features:
self.features = tmp_sr.features
if not self.annotations:
self.annotations = tmp_sr.annotations
if not self.letter_annotations:
self.letter_annotations = tmp_sr.letter_annotations | [
"def",
"sequence_path",
"(",
"self",
",",
"fasta_path",
")",
":",
"if",
"not",
"fasta_path",
":",
"self",
".",
"sequence_dir",
"=",
"None",
"self",
".",
"sequence_file",
"=",
"None",
"else",
":",
"if",
"not",
"op",
".",
"exists",
"(",
"fasta_path",
")",
... | Provide pointers to the paths of the FASTA file
Args:
fasta_path: Path to FASTA file | [
"Provide",
"pointers",
"to",
"the",
"paths",
"of",
"the",
"FASTA",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L253-L286 | train | 28,925 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.feature_path | def feature_path(self, gff_path):
"""Load a GFF file with information on a single sequence and store features in the ``features`` attribute
Args:
gff_path: Path to GFF file.
"""
if not gff_path:
self.feature_dir = None
self.feature_file = None
else:
if not op.exists(gff_path):
raise OSError('{}: file does not exist!'.format(gff_path))
if not op.dirname(gff_path):
self.feature_dir = '.'
else:
self.feature_dir = op.dirname(gff_path)
self.feature_file = op.basename(gff_path) | python | def feature_path(self, gff_path):
"""Load a GFF file with information on a single sequence and store features in the ``features`` attribute
Args:
gff_path: Path to GFF file.
"""
if not gff_path:
self.feature_dir = None
self.feature_file = None
else:
if not op.exists(gff_path):
raise OSError('{}: file does not exist!'.format(gff_path))
if not op.dirname(gff_path):
self.feature_dir = '.'
else:
self.feature_dir = op.dirname(gff_path)
self.feature_file = op.basename(gff_path) | [
"def",
"feature_path",
"(",
"self",
",",
"gff_path",
")",
":",
"if",
"not",
"gff_path",
":",
"self",
".",
"feature_dir",
"=",
"None",
"self",
".",
"feature_file",
"=",
"None",
"else",
":",
"if",
"not",
"op",
".",
"exists",
"(",
"gff_path",
")",
":",
... | Load a GFF file with information on a single sequence and store features in the ``features`` attribute
Args:
gff_path: Path to GFF file. | [
"Load",
"a",
"GFF",
"file",
"with",
"information",
"on",
"a",
"single",
"sequence",
"and",
"store",
"features",
"in",
"the",
"features",
"attribute"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L354-L373 | train | 28,926 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.feature_path_unset | def feature_path_unset(self):
"""Copy features to memory and remove the association of the feature file."""
if not self.feature_file:
raise IOError('No feature file to unset')
with open(self.feature_path) as handle:
feats = list(GFF.parse(handle))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
tmp = feats[0].features
self.feature_dir = None
self.feature_file = None
self.features = tmp | python | def feature_path_unset(self):
"""Copy features to memory and remove the association of the feature file."""
if not self.feature_file:
raise IOError('No feature file to unset')
with open(self.feature_path) as handle:
feats = list(GFF.parse(handle))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
tmp = feats[0].features
self.feature_dir = None
self.feature_file = None
self.features = tmp | [
"def",
"feature_path_unset",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"feature_file",
":",
"raise",
"IOError",
"(",
"'No feature file to unset'",
")",
"with",
"open",
"(",
"self",
".",
"feature_path",
")",
"as",
"handle",
":",
"feats",
"=",
"list",
... | Copy features to memory and remove the association of the feature file. | [
"Copy",
"features",
"to",
"memory",
"and",
"remove",
"the",
"association",
"of",
"the",
"feature",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L375-L389 | train | 28,927 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_dict | def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Get a dictionary of this object's attributes. Optional format for storage in a Pandas DataFrame.
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
# Choose attributes to return, return everything in the object if a list is not specified
if not only_attributes:
keys = list(self.__dict__.keys())
else:
keys = ssbio.utils.force_list(only_attributes)
# Remove keys you don't want returned
if exclude_attributes:
exclude_attributes = ssbio.utils.force_list(exclude_attributes)
for x in exclude_attributes:
if x in keys:
keys.remove(x)
# Copy attributes into a new dictionary
df_dict = {}
for k, orig_v in self.__dict__.items():
if k in keys:
v = deepcopy(orig_v)
if df_format:
if v and not isinstance(v, str) and not isinstance(v, int) and not isinstance(v,
float) and not isinstance(
v, bool):
try:
df_dict[k] = ssbio.utils.force_string(deepcopy(v))
except TypeError:
log.warning('{}: excluding attribute from dict, cannot transform into string'.format(k))
elif not v and not isinstance(v, int) and not isinstance(v, float):
df_dict[k] = None
else:
df_dict[k] = deepcopy(v)
else:
df_dict[k] = deepcopy(v)
return df_dict | python | def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Get a dictionary of this object's attributes. Optional format for storage in a Pandas DataFrame.
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
# Choose attributes to return, return everything in the object if a list is not specified
if not only_attributes:
keys = list(self.__dict__.keys())
else:
keys = ssbio.utils.force_list(only_attributes)
# Remove keys you don't want returned
if exclude_attributes:
exclude_attributes = ssbio.utils.force_list(exclude_attributes)
for x in exclude_attributes:
if x in keys:
keys.remove(x)
# Copy attributes into a new dictionary
df_dict = {}
for k, orig_v in self.__dict__.items():
if k in keys:
v = deepcopy(orig_v)
if df_format:
if v and not isinstance(v, str) and not isinstance(v, int) and not isinstance(v,
float) and not isinstance(
v, bool):
try:
df_dict[k] = ssbio.utils.force_string(deepcopy(v))
except TypeError:
log.warning('{}: excluding attribute from dict, cannot transform into string'.format(k))
elif not v and not isinstance(v, int) and not isinstance(v, float):
df_dict[k] = None
else:
df_dict[k] = deepcopy(v)
else:
df_dict[k] = deepcopy(v)
return df_dict | [
"def",
"get_dict",
"(",
"self",
",",
"only_attributes",
"=",
"None",
",",
"exclude_attributes",
"=",
"None",
",",
"df_format",
"=",
"False",
")",
":",
"# Choose attributes to return, return everything in the object if a list is not specified",
"if",
"not",
"only_attributes"... | Get a dictionary of this object's attributes. Optional format for storage in a Pandas DataFrame.
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes | [
"Get",
"a",
"dictionary",
"of",
"this",
"object",
"s",
"attributes",
".",
"Optional",
"format",
"for",
"storage",
"in",
"a",
"Pandas",
"DataFrame",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L419-L466 | train | 28,928 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.equal_to | def equal_to(self, seq_prop):
"""Test if the sequence is equal to another SeqProp's sequence
Args:
seq_prop: SeqProp object
Returns:
bool: If the sequences are the same
"""
if not self.seq or not seq_prop or not seq_prop.seq:
return False
return self.seq == seq_prop.seq | python | def equal_to(self, seq_prop):
"""Test if the sequence is equal to another SeqProp's sequence
Args:
seq_prop: SeqProp object
Returns:
bool: If the sequences are the same
"""
if not self.seq or not seq_prop or not seq_prop.seq:
return False
return self.seq == seq_prop.seq | [
"def",
"equal_to",
"(",
"self",
",",
"seq_prop",
")",
":",
"if",
"not",
"self",
".",
"seq",
"or",
"not",
"seq_prop",
"or",
"not",
"seq_prop",
".",
"seq",
":",
"return",
"False",
"return",
"self",
".",
"seq",
"==",
"seq_prop",
".",
"seq"
] | Test if the sequence is equal to another SeqProp's sequence
Args:
seq_prop: SeqProp object
Returns:
bool: If the sequences are the same | [
"Test",
"if",
"the",
"sequence",
"is",
"equal",
"to",
"another",
"SeqProp",
"s",
"sequence"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L485-L498 | train | 28,929 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.write_fasta_file | def write_fasta_file(self, outfile, force_rerun=False):
"""Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file.
Args:
outfile (str): Path to new FASTA file to be written to
force_rerun (bool): If an existing file should be overwritten
"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
SeqIO.write(self, outfile, "fasta")
# The Seq as it will now be dynamically loaded from the file
self.sequence_path = outfile | python | def write_fasta_file(self, outfile, force_rerun=False):
"""Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file.
Args:
outfile (str): Path to new FASTA file to be written to
force_rerun (bool): If an existing file should be overwritten
"""
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
SeqIO.write(self, outfile, "fasta")
# The Seq as it will now be dynamically loaded from the file
self.sequence_path = outfile | [
"def",
"write_fasta_file",
"(",
"self",
",",
"outfile",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"ssbio",
".",
"utils",
".",
"force_rerun",
"(",
"flag",
"=",
"force_rerun",
",",
"outfile",
"=",
"outfile",
")",
":",
"SeqIO",
".",
"write",
"(",
... | Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file.
Args:
outfile (str): Path to new FASTA file to be written to
force_rerun (bool): If an existing file should be overwritten | [
"Write",
"a",
"FASTA",
"file",
"for",
"the",
"protein",
"sequence",
"seq",
"will",
"now",
"load",
"directly",
"from",
"this",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L500-L512 | train | 28,930 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.write_gff_file | def write_gff_file(self, outfile, force_rerun=False):
"""Write a GFF file for the protein features, ``features`` will now load directly from this file.
Args:
outfile (str): Path to new FASTA file to be written to
force_rerun (bool): If an existing file should be overwritten
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
with open(outfile, "w") as out_handle:
GFF.write([self], out_handle)
self.feature_path = outfile | python | def write_gff_file(self, outfile, force_rerun=False):
"""Write a GFF file for the protein features, ``features`` will now load directly from this file.
Args:
outfile (str): Path to new FASTA file to be written to
force_rerun (bool): If an existing file should be overwritten
"""
if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):
with open(outfile, "w") as out_handle:
GFF.write([self], out_handle)
self.feature_path = outfile | [
"def",
"write_gff_file",
"(",
"self",
",",
"outfile",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"ssbio",
".",
"utils",
".",
"force_rerun",
"(",
"outfile",
"=",
"outfile",
",",
"flag",
"=",
"force_rerun",
")",
":",
"with",
"open",
"(",
"outfile",
... | Write a GFF file for the protein features, ``features`` will now load directly from this file.
Args:
outfile (str): Path to new FASTA file to be written to
force_rerun (bool): If an existing file should be overwritten | [
"Write",
"a",
"GFF",
"file",
"for",
"the",
"protein",
"features",
"features",
"will",
"now",
"load",
"directly",
"from",
"this",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L514-L526 | train | 28,931 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.add_point_feature | def add_point_feature(self, resnum, feat_type=None, feat_id=None, qualifiers=None):
"""Add a feature to the features list describing a single residue.
Args:
resnum (int): Protein sequence residue number
feat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1')
"""
if self.feature_file:
raise ValueError('Feature file associated with sequence, please remove file association to append '
'additional features.')
if not feat_type:
feat_type = 'Manually added protein sequence single residue feature'
newfeat = SeqFeature(location=FeatureLocation(ExactPosition(resnum-1), ExactPosition(resnum)),
type=feat_type,
id=feat_id,
qualifiers=qualifiers)
self.features.append(newfeat) | python | def add_point_feature(self, resnum, feat_type=None, feat_id=None, qualifiers=None):
"""Add a feature to the features list describing a single residue.
Args:
resnum (int): Protein sequence residue number
feat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1')
"""
if self.feature_file:
raise ValueError('Feature file associated with sequence, please remove file association to append '
'additional features.')
if not feat_type:
feat_type = 'Manually added protein sequence single residue feature'
newfeat = SeqFeature(location=FeatureLocation(ExactPosition(resnum-1), ExactPosition(resnum)),
type=feat_type,
id=feat_id,
qualifiers=qualifiers)
self.features.append(newfeat) | [
"def",
"add_point_feature",
"(",
"self",
",",
"resnum",
",",
"feat_type",
"=",
"None",
",",
"feat_id",
"=",
"None",
",",
"qualifiers",
"=",
"None",
")",
":",
"if",
"self",
".",
"feature_file",
":",
"raise",
"ValueError",
"(",
"'Feature file associated with seq... | Add a feature to the features list describing a single residue.
Args:
resnum (int): Protein sequence residue number
feat_type (str, optional): Optional description of the feature type (ie. 'catalytic residue')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1') | [
"Add",
"a",
"feature",
"to",
"the",
"features",
"list",
"describing",
"a",
"single",
"residue",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L528-L548 | train | 28,932 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.add_region_feature | def add_region_feature(self, start_resnum, end_resnum, feat_type=None, feat_id=None, qualifiers=None):
"""Add a feature to the features list describing a region of the protein sequence.
Args:
start_resnum (int): Start residue number of the protein sequence feature
end_resnum (int): End residue number of the protein sequence feature
feat_type (str, optional): Optional description of the feature type (ie. 'binding domain')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1')
"""
if self.feature_file:
raise ValueError('Feature file associated with sequence, please remove file association to append '
'additional features.')
if not feat_type:
feat_type = 'Manually added protein sequence region feature'
newfeat = SeqFeature(location=FeatureLocation(start_resnum-1, end_resnum),
type=feat_type,
id=feat_id,
qualifiers=qualifiers)
self.features.append(newfeat) | python | def add_region_feature(self, start_resnum, end_resnum, feat_type=None, feat_id=None, qualifiers=None):
"""Add a feature to the features list describing a region of the protein sequence.
Args:
start_resnum (int): Start residue number of the protein sequence feature
end_resnum (int): End residue number of the protein sequence feature
feat_type (str, optional): Optional description of the feature type (ie. 'binding domain')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1')
"""
if self.feature_file:
raise ValueError('Feature file associated with sequence, please remove file association to append '
'additional features.')
if not feat_type:
feat_type = 'Manually added protein sequence region feature'
newfeat = SeqFeature(location=FeatureLocation(start_resnum-1, end_resnum),
type=feat_type,
id=feat_id,
qualifiers=qualifiers)
self.features.append(newfeat) | [
"def",
"add_region_feature",
"(",
"self",
",",
"start_resnum",
",",
"end_resnum",
",",
"feat_type",
"=",
"None",
",",
"feat_id",
"=",
"None",
",",
"qualifiers",
"=",
"None",
")",
":",
"if",
"self",
".",
"feature_file",
":",
"raise",
"ValueError",
"(",
"'Fe... | Add a feature to the features list describing a region of the protein sequence.
Args:
start_resnum (int): Start residue number of the protein sequence feature
end_resnum (int): End residue number of the protein sequence feature
feat_type (str, optional): Optional description of the feature type (ie. 'binding domain')
feat_id (str, optional): Optional ID of the feature type (ie. 'TM1') | [
"Add",
"a",
"feature",
"to",
"the",
"features",
"list",
"describing",
"a",
"region",
"of",
"the",
"protein",
"sequence",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L550-L571 | train | 28,933 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_subsequence | def get_subsequence(self, resnums, new_id=None, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a list of residue numbers"""
# XTODO: documentation
biop_compound_list = []
for resnum in resnums:
# XTODO can be sped up by separating into ranges based on continuous resnums
feat = FeatureLocation(resnum - 1, resnum)
biop_compound_list.append(feat)
if len(biop_compound_list) == 0:
log.debug('Zero length subsequence')
return
elif len(biop_compound_list) == 1:
log.debug('Subsequence only one residue long')
sub_feature_location = biop_compound_list[0]
else:
sub_feature_location = CompoundLocation(biop_compound_list)
try:
sub_feature = sub_feature_location.extract(self)
except TypeError:
log.critical('SeqProp {}: unknown error when trying to get subsequence - please investigate! '
'Try using a feature to extract a subsequence from the SeqProp'.format(self.id))
return
if not new_id:
new_id = '{}_subseq'.format(self.id)
new_sp = SeqProp(id=new_id, seq=sub_feature.seq)
if copy_letter_annotations:
new_sp.letter_annotations = sub_feature.letter_annotations
return new_sp | python | def get_subsequence(self, resnums, new_id=None, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a list of residue numbers"""
# XTODO: documentation
biop_compound_list = []
for resnum in resnums:
# XTODO can be sped up by separating into ranges based on continuous resnums
feat = FeatureLocation(resnum - 1, resnum)
biop_compound_list.append(feat)
if len(biop_compound_list) == 0:
log.debug('Zero length subsequence')
return
elif len(biop_compound_list) == 1:
log.debug('Subsequence only one residue long')
sub_feature_location = biop_compound_list[0]
else:
sub_feature_location = CompoundLocation(biop_compound_list)
try:
sub_feature = sub_feature_location.extract(self)
except TypeError:
log.critical('SeqProp {}: unknown error when trying to get subsequence - please investigate! '
'Try using a feature to extract a subsequence from the SeqProp'.format(self.id))
return
if not new_id:
new_id = '{}_subseq'.format(self.id)
new_sp = SeqProp(id=new_id, seq=sub_feature.seq)
if copy_letter_annotations:
new_sp.letter_annotations = sub_feature.letter_annotations
return new_sp | [
"def",
"get_subsequence",
"(",
"self",
",",
"resnums",
",",
"new_id",
"=",
"None",
",",
"copy_letter_annotations",
"=",
"True",
")",
":",
"# XTODO: documentation",
"biop_compound_list",
"=",
"[",
"]",
"for",
"resnum",
"in",
"resnums",
":",
"# XTODO can be sped up ... | Get a subsequence as a new SeqProp object given a list of residue numbers | [
"Get",
"a",
"subsequence",
"as",
"a",
"new",
"SeqProp",
"object",
"given",
"a",
"list",
"of",
"residue",
"numbers"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L573-L604 | train | 28,934 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_subsequence_from_property | def get_subsequence_from_property(self, property_key, property_value, condition,
return_resnums=False, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a certain property you want to find in the
original SeqProp's letter_annotation
This can be used to do something like extract the subsequence of exposed residues, so you can can run
calculations on that subsequence. Useful if you have questions like "are there any predicted surface exposed
cysteines in my protein sequence?"
Example:
>>> sp = SeqProp(id='tester', seq='MQSLE')
>>> sp.letter_annotations['a_key'] = [2, 2, 3, 1, 0]
>>> pk = 'a_key'
>>> pv = 2
>>> cond = '<'
>>> new_sp = sp.get_subsequence_from_property(pk, pv, cond)
>>> new_sp.letter_annotations[pk]
[1, 0]
>>> new_sp
SeqProp(seq=Seq('LE', ExtendedIUPACProtein()), id='tester_a_key_<_2_extracted', name='<unknown name>', description='<unknown description>', dbxrefs=[])
Args:
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (str): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties
"""
if property_key not in self.letter_annotations:
log.error('{}: {} not contained in the letter annotations'.format(self.id, property_key))
return
if condition == 'in':
subfeat_indices = list(locate(self.letter_annotations[property_key],
lambda x: x in property_value))
else:
subfeat_indices = list(
locate(self.letter_annotations[property_key], lambda x: ssbio.utils.check_condition(x, condition, property_value)))
subfeat_resnums = [x+1 for x in subfeat_indices]
new_sp = self.get_subsequence(resnums=subfeat_resnums, new_id='{}_{}_{}_{}_extracted'.format(self.id,
property_key,
condition,
property_value),
copy_letter_annotations=copy_letter_annotations)
if return_resnums:
return new_sp, subfeat_resnums
else:
return new_sp | python | def get_subsequence_from_property(self, property_key, property_value, condition,
return_resnums=False, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a certain property you want to find in the
original SeqProp's letter_annotation
This can be used to do something like extract the subsequence of exposed residues, so you can can run
calculations on that subsequence. Useful if you have questions like "are there any predicted surface exposed
cysteines in my protein sequence?"
Example:
>>> sp = SeqProp(id='tester', seq='MQSLE')
>>> sp.letter_annotations['a_key'] = [2, 2, 3, 1, 0]
>>> pk = 'a_key'
>>> pv = 2
>>> cond = '<'
>>> new_sp = sp.get_subsequence_from_property(pk, pv, cond)
>>> new_sp.letter_annotations[pk]
[1, 0]
>>> new_sp
SeqProp(seq=Seq('LE', ExtendedIUPACProtein()), id='tester_a_key_<_2_extracted', name='<unknown name>', description='<unknown description>', dbxrefs=[])
Args:
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (str): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties
"""
if property_key not in self.letter_annotations:
log.error('{}: {} not contained in the letter annotations'.format(self.id, property_key))
return
if condition == 'in':
subfeat_indices = list(locate(self.letter_annotations[property_key],
lambda x: x in property_value))
else:
subfeat_indices = list(
locate(self.letter_annotations[property_key], lambda x: ssbio.utils.check_condition(x, condition, property_value)))
subfeat_resnums = [x+1 for x in subfeat_indices]
new_sp = self.get_subsequence(resnums=subfeat_resnums, new_id='{}_{}_{}_{}_extracted'.format(self.id,
property_key,
condition,
property_value),
copy_letter_annotations=copy_letter_annotations)
if return_resnums:
return new_sp, subfeat_resnums
else:
return new_sp | [
"def",
"get_subsequence_from_property",
"(",
"self",
",",
"property_key",
",",
"property_value",
",",
"condition",
",",
"return_resnums",
"=",
"False",
",",
"copy_letter_annotations",
"=",
"True",
")",
":",
"if",
"property_key",
"not",
"in",
"self",
".",
"letter_a... | Get a subsequence as a new SeqProp object given a certain property you want to find in the
original SeqProp's letter_annotation
This can be used to do something like extract the subsequence of exposed residues, so you can can run
calculations on that subsequence. Useful if you have questions like "are there any predicted surface exposed
cysteines in my protein sequence?"
Example:
>>> sp = SeqProp(id='tester', seq='MQSLE')
>>> sp.letter_annotations['a_key'] = [2, 2, 3, 1, 0]
>>> pk = 'a_key'
>>> pv = 2
>>> cond = '<'
>>> new_sp = sp.get_subsequence_from_property(pk, pv, cond)
>>> new_sp.letter_annotations[pk]
[1, 0]
>>> new_sp
SeqProp(seq=Seq('LE', ExtendedIUPACProtein()), id='tester_a_key_<_2_extracted', name='<unknown name>', description='<unknown description>', dbxrefs=[])
Args:
property_key (str): Property key in the ``letter_annotations`` attribute that you want to filter using
property_value (str): Property value that you want to filter by
condition (str): ``<``, ``=``, ``>``, ``>=``, or ``<=`` to filter the values by
Returns:
SeqProp: New SeqProp object that you can run computations on or just extract its properties | [
"Get",
"a",
"subsequence",
"as",
"a",
"new",
"SeqProp",
"object",
"given",
"a",
"certain",
"property",
"you",
"want",
"to",
"find",
"in",
"the",
"original",
"SeqProp",
"s",
"letter_annotation"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L606-L658 | train | 28,935 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_biopython_pepstats | def get_biopython_pepstats(self, clean_seq=False):
"""Run Biopython's built in ProteinAnalysis module and store statistics in the ``annotations`` attribute."""
if self.seq:
if clean_seq: # TODO: can make this a property of the SeqProp class
seq = self.seq_str.replace('X', '').replace('U', '')
else:
seq = self.seq_str
try:
pepstats = ssbio.protein.sequence.properties.residues.biopython_protein_analysis(seq)
except KeyError as e:
log.error('{}: unable to run ProteinAnalysis module, unknown amino acid {}'.format(self.id, e))
return
except ValueError as e:
log.error('{}: unable to run ProteinAnalysis module, {}'.format(self.id, e))
return
self.annotations.update(pepstats)
else:
raise ValueError('{}: no sequence available, unable to run ProteinAnalysis'.format(self.id)) | python | def get_biopython_pepstats(self, clean_seq=False):
"""Run Biopython's built in ProteinAnalysis module and store statistics in the ``annotations`` attribute."""
if self.seq:
if clean_seq: # TODO: can make this a property of the SeqProp class
seq = self.seq_str.replace('X', '').replace('U', '')
else:
seq = self.seq_str
try:
pepstats = ssbio.protein.sequence.properties.residues.biopython_protein_analysis(seq)
except KeyError as e:
log.error('{}: unable to run ProteinAnalysis module, unknown amino acid {}'.format(self.id, e))
return
except ValueError as e:
log.error('{}: unable to run ProteinAnalysis module, {}'.format(self.id, e))
return
self.annotations.update(pepstats)
else:
raise ValueError('{}: no sequence available, unable to run ProteinAnalysis'.format(self.id)) | [
"def",
"get_biopython_pepstats",
"(",
"self",
",",
"clean_seq",
"=",
"False",
")",
":",
"if",
"self",
".",
"seq",
":",
"if",
"clean_seq",
":",
"# TODO: can make this a property of the SeqProp class",
"seq",
"=",
"self",
".",
"seq_str",
".",
"replace",
"(",
"'X'"... | Run Biopython's built in ProteinAnalysis module and store statistics in the ``annotations`` attribute. | [
"Run",
"Biopython",
"s",
"built",
"in",
"ProteinAnalysis",
"module",
"and",
"store",
"statistics",
"in",
"the",
"annotations",
"attribute",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L660-L679 | train | 28,936 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_emboss_pepstats | def get_emboss_pepstats(self):
"""Run the EMBOSS pepstats program on the protein sequence.
Stores statistics in the ``annotations`` attribute.
Saves a ``.pepstats`` file of the results where the sequence file is located.
"""
if not self.sequence_file:
raise IOError('FASTA file needs to be written for EMBOSS pepstats to be run')
outfile = ssbio.protein.sequence.properties.residues.emboss_pepstats_on_fasta(infile=self.sequence_path)
pepstats = ssbio.protein.sequence.properties.residues.emboss_pepstats_parser(outfile)
self.annotations.update(pepstats) | python | def get_emboss_pepstats(self):
"""Run the EMBOSS pepstats program on the protein sequence.
Stores statistics in the ``annotations`` attribute.
Saves a ``.pepstats`` file of the results where the sequence file is located.
"""
if not self.sequence_file:
raise IOError('FASTA file needs to be written for EMBOSS pepstats to be run')
outfile = ssbio.protein.sequence.properties.residues.emboss_pepstats_on_fasta(infile=self.sequence_path)
pepstats = ssbio.protein.sequence.properties.residues.emboss_pepstats_parser(outfile)
self.annotations.update(pepstats) | [
"def",
"get_emboss_pepstats",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"sequence_file",
":",
"raise",
"IOError",
"(",
"'FASTA file needs to be written for EMBOSS pepstats to be run'",
")",
"outfile",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"prope... | Run the EMBOSS pepstats program on the protein sequence.
Stores statistics in the ``annotations`` attribute.
Saves a ``.pepstats`` file of the results where the sequence file is located. | [
"Run",
"the",
"EMBOSS",
"pepstats",
"program",
"on",
"the",
"protein",
"sequence",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L681-L691 | train | 28,937 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_sliding_window_properties | def get_sliding_window_properties(self, scale, window):
"""Run a property calculator given a sliding window size
Stores statistics in the ``letter_annotations`` attribute.
Todo:
- Add and document all scales available to set
"""
# XTODO: documentation
if self.seq:
# if clean_seq: # TODO: can't do this because letter_annotations will complain about differing seqlen
# seq = self.seq_str.replace('X', '').replace('U', '')
# else:
# seq = self.seq_str
try:
prop = ssbio.protein.sequence.properties.residues.biopython_protein_scale(self.seq_str,
scale=scale,
window=window)
except KeyError as e:
log.error('{}: unable to run ProteinAnalysis module, unknown amino acid {}'.format(self.id, e))
return
except ValueError as e:
log.error('{}: unable to run ProteinAnalysis module, {}'.format(self.id, e))
return
self.letter_annotations['{}-window{}-biop'.format(scale, window)] = prop
else:
raise ValueError('{}: no sequence available, unable to run ProteinAnalysis'.format(self.id)) | python | def get_sliding_window_properties(self, scale, window):
"""Run a property calculator given a sliding window size
Stores statistics in the ``letter_annotations`` attribute.
Todo:
- Add and document all scales available to set
"""
# XTODO: documentation
if self.seq:
# if clean_seq: # TODO: can't do this because letter_annotations will complain about differing seqlen
# seq = self.seq_str.replace('X', '').replace('U', '')
# else:
# seq = self.seq_str
try:
prop = ssbio.protein.sequence.properties.residues.biopython_protein_scale(self.seq_str,
scale=scale,
window=window)
except KeyError as e:
log.error('{}: unable to run ProteinAnalysis module, unknown amino acid {}'.format(self.id, e))
return
except ValueError as e:
log.error('{}: unable to run ProteinAnalysis module, {}'.format(self.id, e))
return
self.letter_annotations['{}-window{}-biop'.format(scale, window)] = prop
else:
raise ValueError('{}: no sequence available, unable to run ProteinAnalysis'.format(self.id)) | [
"def",
"get_sliding_window_properties",
"(",
"self",
",",
"scale",
",",
"window",
")",
":",
"# XTODO: documentation",
"if",
"self",
".",
"seq",
":",
"# if clean_seq: # TODO: can't do this because letter_annotations will complain about differing seqlen",
"# seq = self.seq_str.r... | Run a property calculator given a sliding window size
Stores statistics in the ``letter_annotations`` attribute.
Todo:
- Add and document all scales available to set | [
"Run",
"a",
"property",
"calculator",
"given",
"a",
"sliding",
"window",
"size"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L693-L720 | train | 28,938 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.blast_pdb | def blast_pdb(self, seq_ident_cutoff=0, evalue=0.0001, display_link=False,
outdir=None, force_rerun=False):
"""BLAST this sequence to the PDB"""
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not self.seq_str:
log.error('{}: no sequence loaded'.format(self.id))
return None
try:
blast_results = ssbio.databases.pdb_seq.blast_pdb(self.seq_str,
outfile='{}_blast_pdb.xml'.format(custom_slugify(self.id)),
outdir=outdir,
force_rerun=force_rerun,
evalue=evalue,
seq_ident_cutoff=seq_ident_cutoff,
link=display_link)
except requests.ConnectionError as e:
log.error('{}: BLAST request timed out'.format(self.id))
print(e)
return None
return blast_results | python | def blast_pdb(self, seq_ident_cutoff=0, evalue=0.0001, display_link=False,
outdir=None, force_rerun=False):
"""BLAST this sequence to the PDB"""
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not self.seq_str:
log.error('{}: no sequence loaded'.format(self.id))
return None
try:
blast_results = ssbio.databases.pdb_seq.blast_pdb(self.seq_str,
outfile='{}_blast_pdb.xml'.format(custom_slugify(self.id)),
outdir=outdir,
force_rerun=force_rerun,
evalue=evalue,
seq_ident_cutoff=seq_ident_cutoff,
link=display_link)
except requests.ConnectionError as e:
log.error('{}: BLAST request timed out'.format(self.id))
print(e)
return None
return blast_results | [
"def",
"blast_pdb",
"(",
"self",
",",
"seq_ident_cutoff",
"=",
"0",
",",
"evalue",
"=",
"0.0001",
",",
"display_link",
"=",
"False",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"sel... | BLAST this sequence to the PDB | [
"BLAST",
"this",
"sequence",
"to",
"the",
"PDB"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L722-L747 | train | 28,939 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_residue_annotations | def get_residue_annotations(self, start_resnum, end_resnum=None):
"""Retrieve letter annotations for a residue or a range of residues
Args:
start_resnum (int): Residue number
end_resnum (int): Optional residue number, specify if a range is desired
Returns:
dict: Letter annotations for this residue or residues
"""
if not end_resnum:
end_resnum = start_resnum
# Create a new SeqFeature
f = SeqFeature(FeatureLocation(start_resnum - 1, end_resnum))
# Get sequence properties
return f.extract(self).letter_annotations | python | def get_residue_annotations(self, start_resnum, end_resnum=None):
"""Retrieve letter annotations for a residue or a range of residues
Args:
start_resnum (int): Residue number
end_resnum (int): Optional residue number, specify if a range is desired
Returns:
dict: Letter annotations for this residue or residues
"""
if not end_resnum:
end_resnum = start_resnum
# Create a new SeqFeature
f = SeqFeature(FeatureLocation(start_resnum - 1, end_resnum))
# Get sequence properties
return f.extract(self).letter_annotations | [
"def",
"get_residue_annotations",
"(",
"self",
",",
"start_resnum",
",",
"end_resnum",
"=",
"None",
")",
":",
"if",
"not",
"end_resnum",
":",
"end_resnum",
"=",
"start_resnum",
"# Create a new SeqFeature",
"f",
"=",
"SeqFeature",
"(",
"FeatureLocation",
"(",
"star... | Retrieve letter annotations for a residue or a range of residues
Args:
start_resnum (int): Residue number
end_resnum (int): Optional residue number, specify if a range is desired
Returns:
dict: Letter annotations for this residue or residues | [
"Retrieve",
"letter",
"annotations",
"for",
"a",
"residue",
"or",
"a",
"range",
"of",
"residues"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L749-L767 | train | 28,940 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_aggregation_propensity | def get_aggregation_propensity(self, email, password, cutoff_v=5, cutoff_n=5, run_amylmuts=False, outdir=None):
"""Run the AMYLPRED2 web server to calculate the aggregation propensity of this protein sequence, which is
the number of aggregation-prone segments on the unfolded protein sequence.
Stores statistics in the ``annotations`` attribute, under the key `aggprop-amylpred`.
See :mod:`ssbio.protein.sequence.properties.aggregation_propensity` for instructions and details.
"""
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
import ssbio.protein.sequence.properties.aggregation_propensity as agg
agg_predictions = agg.AMYLPRED(email=email, password=password)
result = agg_predictions.get_aggregation_propensity(seq=self, outdir=outdir,
cutoff_v=cutoff_v, cutoff_n=cutoff_n,
run_amylmuts=run_amylmuts)
self.annotations['aggprop-amylpred'] = result | python | def get_aggregation_propensity(self, email, password, cutoff_v=5, cutoff_n=5, run_amylmuts=False, outdir=None):
"""Run the AMYLPRED2 web server to calculate the aggregation propensity of this protein sequence, which is
the number of aggregation-prone segments on the unfolded protein sequence.
Stores statistics in the ``annotations`` attribute, under the key `aggprop-amylpred`.
See :mod:`ssbio.protein.sequence.properties.aggregation_propensity` for instructions and details.
"""
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
import ssbio.protein.sequence.properties.aggregation_propensity as agg
agg_predictions = agg.AMYLPRED(email=email, password=password)
result = agg_predictions.get_aggregation_propensity(seq=self, outdir=outdir,
cutoff_v=cutoff_v, cutoff_n=cutoff_n,
run_amylmuts=run_amylmuts)
self.annotations['aggprop-amylpred'] = result | [
"def",
"get_aggregation_propensity",
"(",
"self",
",",
"email",
",",
"password",
",",
"cutoff_v",
"=",
"5",
",",
"cutoff_n",
"=",
"5",
",",
"run_amylmuts",
"=",
"False",
",",
"outdir",
"=",
"None",
")",
":",
"if",
"not",
"outdir",
":",
"outdir",
"=",
"... | Run the AMYLPRED2 web server to calculate the aggregation propensity of this protein sequence, which is
the number of aggregation-prone segments on the unfolded protein sequence.
Stores statistics in the ``annotations`` attribute, under the key `aggprop-amylpred`.
See :mod:`ssbio.protein.sequence.properties.aggregation_propensity` for instructions and details. | [
"Run",
"the",
"AMYLPRED2",
"web",
"server",
"to",
"calculate",
"the",
"aggregation",
"propensity",
"of",
"this",
"protein",
"sequence",
"which",
"is",
"the",
"number",
"of",
"aggregation",
"-",
"prone",
"segments",
"on",
"the",
"unfolded",
"protein",
"sequence",... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L769-L789 | train | 28,941 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.get_thermostability | def get_thermostability(self, at_temp):
"""Run the thermostability calculator using either the Dill or Oobatake methods.
Stores calculated (dG, Keq) tuple in the ``annotations`` attribute, under the key
`thermostability_<TEMP>-<METHOD_USED>`.
See :func:`ssbio.protein.sequence.properties.thermostability.get_dG_at_T` for instructions and details.
"""
import ssbio.protein.sequence.properties.thermostability as ts
dG = ts.get_dG_at_T(seq=self, temp=at_temp)
self.annotations['thermostability_{}_C-{}'.format(at_temp, dG[2].lower())] = (dG[0], dG[1]) | python | def get_thermostability(self, at_temp):
"""Run the thermostability calculator using either the Dill or Oobatake methods.
Stores calculated (dG, Keq) tuple in the ``annotations`` attribute, under the key
`thermostability_<TEMP>-<METHOD_USED>`.
See :func:`ssbio.protein.sequence.properties.thermostability.get_dG_at_T` for instructions and details.
"""
import ssbio.protein.sequence.properties.thermostability as ts
dG = ts.get_dG_at_T(seq=self, temp=at_temp)
self.annotations['thermostability_{}_C-{}'.format(at_temp, dG[2].lower())] = (dG[0], dG[1]) | [
"def",
"get_thermostability",
"(",
"self",
",",
"at_temp",
")",
":",
"import",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"properties",
".",
"thermostability",
"as",
"ts",
"dG",
"=",
"ts",
".",
"get_dG_at_T",
"(",
"seq",
"=",
"self",
",",
"temp",
"="... | Run the thermostability calculator using either the Dill or Oobatake methods.
Stores calculated (dG, Keq) tuple in the ``annotations`` attribute, under the key
`thermostability_<TEMP>-<METHOD_USED>`.
See :func:`ssbio.protein.sequence.properties.thermostability.get_dG_at_T` for instructions and details. | [
"Run",
"the",
"thermostability",
"calculator",
"using",
"either",
"the",
"Dill",
"or",
"Oobatake",
"methods",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L810-L823 | train | 28,942 |
SBRG/ssbio | ssbio/protein/sequence/seqprop.py | SeqProp.store_iupred_disorder_predictions | def store_iupred_disorder_predictions(seqprop, iupred_path,
iupred_exec, prediction_type, force_rerun=False):
"""Scores above 0.5 indicate disorder"""
os.environ['IUPred_PATH'] = iupred_path
stored_key = 'disorder-{}-iupred'.format(prediction_type)
if stored_key not in seqprop.letter_annotations or force_rerun:
if not seqprop.sequence_file:
with tempfile.NamedTemporaryFile(delete=True) as f:
SeqIO.write(seqprop, f.name, "fasta")
command = '{} {} {}'.format(iupred_exec, f.name, prediction_type)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True)
output = process.communicate()
iupred = [float(x.split()[2]) for x in output[0].decode().split('\n') if
not x.startswith('#') and len(x) > 0]
seqprop.letter_annotations[stored_key] = iupred
else:
command = '{} {} {}'.format(iupred_exec, seqprop.sequence_path, prediction_type)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True)
output = process.communicate()
iupred = [float(x.split()[2]) for x in output[0].decode().split('\n') if
not x.startswith('#') and len(x) > 0]
seqprop.letter_annotations[stored_key] = iupred | python | def store_iupred_disorder_predictions(seqprop, iupred_path,
iupred_exec, prediction_type, force_rerun=False):
"""Scores above 0.5 indicate disorder"""
os.environ['IUPred_PATH'] = iupred_path
stored_key = 'disorder-{}-iupred'.format(prediction_type)
if stored_key not in seqprop.letter_annotations or force_rerun:
if not seqprop.sequence_file:
with tempfile.NamedTemporaryFile(delete=True) as f:
SeqIO.write(seqprop, f.name, "fasta")
command = '{} {} {}'.format(iupred_exec, f.name, prediction_type)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True)
output = process.communicate()
iupred = [float(x.split()[2]) for x in output[0].decode().split('\n') if
not x.startswith('#') and len(x) > 0]
seqprop.letter_annotations[stored_key] = iupred
else:
command = '{} {} {}'.format(iupred_exec, seqprop.sequence_path, prediction_type)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None, shell=True)
output = process.communicate()
iupred = [float(x.split()[2]) for x in output[0].decode().split('\n') if
not x.startswith('#') and len(x) > 0]
seqprop.letter_annotations[stored_key] = iupred | [
"def",
"store_iupred_disorder_predictions",
"(",
"seqprop",
",",
"iupred_path",
",",
"iupred_exec",
",",
"prediction_type",
",",
"force_rerun",
"=",
"False",
")",
":",
"os",
".",
"environ",
"[",
"'IUPred_PATH'",
"]",
"=",
"iupred_path",
"stored_key",
"=",
"'disord... | Scores above 0.5 indicate disorder | [
"Scores",
"above",
"0",
".",
"5",
"indicate",
"disorder"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L836-L857 | train | 28,943 |
SBRG/ssbio | ssbio/protein/sequence/properties/scratch.py | SCRATCH.run_scratch | def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False):
"""Run SCRATCH on the sequence_file that was loaded into the class.
Args:
path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh
outname: Prefix to name the output files
outdir: Directory to store the output files
force_rerun: Flag to force rerunning of SCRATCH even if the output files exist
Returns:
"""
if not outname:
outname = self.project_name
if not outdir:
outdir = ''
outname = op.join(outdir, outname)
self.out_sspro = '{}.ss'.format(outname)
self.out_sspro8 = '{}.ss8'.format(outname)
self.out_accpro = '{}.acc'.format(outname)
self.out_accpro20 = '{}.acc20'.format(outname)
# TODO: check for multiple output files in command_runner
ssbio.utils.command_runner(
shell_command='{} {} {} {}'.format(path_to_scratch, self.seq_file, outname, num_cores),
force_rerun_flag=force_rerun, outfile_checker='{}.ss'.format(outname)) | python | def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False):
"""Run SCRATCH on the sequence_file that was loaded into the class.
Args:
path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh
outname: Prefix to name the output files
outdir: Directory to store the output files
force_rerun: Flag to force rerunning of SCRATCH even if the output files exist
Returns:
"""
if not outname:
outname = self.project_name
if not outdir:
outdir = ''
outname = op.join(outdir, outname)
self.out_sspro = '{}.ss'.format(outname)
self.out_sspro8 = '{}.ss8'.format(outname)
self.out_accpro = '{}.acc'.format(outname)
self.out_accpro20 = '{}.acc20'.format(outname)
# TODO: check for multiple output files in command_runner
ssbio.utils.command_runner(
shell_command='{} {} {} {}'.format(path_to_scratch, self.seq_file, outname, num_cores),
force_rerun_flag=force_rerun, outfile_checker='{}.ss'.format(outname)) | [
"def",
"run_scratch",
"(",
"self",
",",
"path_to_scratch",
",",
"num_cores",
"=",
"1",
",",
"outname",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"if",
"not",
"outname",
":",
"outname",
"=",
"self",
".",
"proje... | Run SCRATCH on the sequence_file that was loaded into the class.
Args:
path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh
outname: Prefix to name the output files
outdir: Directory to store the output files
force_rerun: Flag to force rerunning of SCRATCH even if the output files exist
Returns: | [
"Run",
"SCRATCH",
"on",
"the",
"sequence_file",
"that",
"was",
"loaded",
"into",
"the",
"class",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L39-L66 | train | 28,944 |
SBRG/ssbio | ssbio/protein/sequence/properties/scratch.py | SCRATCH.sspro_results | def sspro_results(self):
"""Parse the SSpro output file and return a dict of secondary structure compositions.
Returns:
dict: Keys are sequence IDs, values are the lists of secondary structure predictions.
H: helix
E: strand
C: the rest
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro) | python | def sspro_results(self):
"""Parse the SSpro output file and return a dict of secondary structure compositions.
Returns:
dict: Keys are sequence IDs, values are the lists of secondary structure predictions.
H: helix
E: strand
C: the rest
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro) | [
"def",
"sspro_results",
"(",
"self",
")",
":",
"return",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"fasta",
".",
"load_fasta_file_as_dict_of_seqs",
"(",
"self",
".",
"out_sspro",
")"
] | Parse the SSpro output file and return a dict of secondary structure compositions.
Returns:
dict: Keys are sequence IDs, values are the lists of secondary structure predictions.
H: helix
E: strand
C: the rest | [
"Parse",
"the",
"SSpro",
"output",
"file",
"and",
"return",
"a",
"dict",
"of",
"secondary",
"structure",
"compositions",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L68-L78 | train | 28,945 |
SBRG/ssbio | ssbio/protein/sequence/properties/scratch.py | SCRATCH.sspro_summary | def sspro_summary(self):
"""Parse the SSpro output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: helix
E: strand
C: the rest
"""
summary = {}
records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro)
for r in records:
seq_summary = {}
seq_summary['percent_H-sspro'] = r.seq.count('H')/float(len(r))
seq_summary['percent_E-sspro'] = r.seq.count('E')/float(len(r))
seq_summary['percent_C-sspro'] = r.seq.count('C')/float(len(r))
summary[r.id] = seq_summary
return summary | python | def sspro_summary(self):
"""Parse the SSpro output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: helix
E: strand
C: the rest
"""
summary = {}
records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro)
for r in records:
seq_summary = {}
seq_summary['percent_H-sspro'] = r.seq.count('H')/float(len(r))
seq_summary['percent_E-sspro'] = r.seq.count('E')/float(len(r))
seq_summary['percent_C-sspro'] = r.seq.count('C')/float(len(r))
summary[r.id] = seq_summary
return summary | [
"def",
"sspro_summary",
"(",
"self",
")",
":",
"summary",
"=",
"{",
"}",
"records",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"fasta",
".",
"load_fasta_file",
"(",
"self",
".",
"out_sspro",
")",
"for",
"r",
"in",
"records",
":",... | Parse the SSpro output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: helix
E: strand
C: the rest | [
"Parse",
"the",
"SSpro",
"output",
"file",
"and",
"return",
"a",
"summary",
"of",
"secondary",
"structure",
"composition",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L80-L104 | train | 28,946 |
SBRG/ssbio | ssbio/protein/sequence/properties/scratch.py | SCRATCH.sspro8_results | def sspro8_results(self):
"""Parse the SSpro8 output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro8) | python | def sspro8_results(self):
"""Parse the SSpro8 output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro8) | [
"def",
"sspro8_results",
"(",
"self",
")",
":",
"return",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"fasta",
".",
"load_fasta_file_as_dict_of_seqs",
"(",
"self",
".",
"out_sspro8",
")"
] | Parse the SSpro8 output file and return a dict of secondary structure compositions. | [
"Parse",
"the",
"SSpro8",
"output",
"file",
"and",
"return",
"a",
"dict",
"of",
"secondary",
"structure",
"compositions",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L106-L109 | train | 28,947 |
SBRG/ssbio | ssbio/protein/sequence/properties/scratch.py | SCRATCH.sspro8_summary | def sspro8_summary(self):
"""Parse the SSpro8 output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: alpha-helix
G: 310-helix
I: pi-helix (extremely rare)
E: extended strand
B: beta-bridge
T: turn
S: bend
C: the rest
"""
summary = {}
records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro8)
for r in records:
seq_summary = {}
seq_summary['percent_H-sspro8'] = r.seq.count('H') / float(len(r))
seq_summary['percent_G-sspro8'] = r.seq.count('G') / float(len(r))
seq_summary['percent_I-sspro8'] = r.seq.count('I') / float(len(r))
seq_summary['percent_E-sspro8'] = r.seq.count('E') / float(len(r))
seq_summary['percent_B-sspro8'] = r.seq.count('B') / float(len(r))
seq_summary['percent_T-sspro8'] = r.seq.count('T') / float(len(r))
seq_summary['percent_S-sspro8'] = r.seq.count('S') / float(len(r))
seq_summary['percent_C-sspro8'] = r.seq.count('C') / float(len(r))
summary[r.id] = seq_summary
return summary | python | def sspro8_summary(self):
"""Parse the SSpro8 output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: alpha-helix
G: 310-helix
I: pi-helix (extremely rare)
E: extended strand
B: beta-bridge
T: turn
S: bend
C: the rest
"""
summary = {}
records = ssbio.protein.sequence.utils.fasta.load_fasta_file(self.out_sspro8)
for r in records:
seq_summary = {}
seq_summary['percent_H-sspro8'] = r.seq.count('H') / float(len(r))
seq_summary['percent_G-sspro8'] = r.seq.count('G') / float(len(r))
seq_summary['percent_I-sspro8'] = r.seq.count('I') / float(len(r))
seq_summary['percent_E-sspro8'] = r.seq.count('E') / float(len(r))
seq_summary['percent_B-sspro8'] = r.seq.count('B') / float(len(r))
seq_summary['percent_T-sspro8'] = r.seq.count('T') / float(len(r))
seq_summary['percent_S-sspro8'] = r.seq.count('S') / float(len(r))
seq_summary['percent_C-sspro8'] = r.seq.count('C') / float(len(r))
summary[r.id] = seq_summary
return summary | [
"def",
"sspro8_summary",
"(",
"self",
")",
":",
"summary",
"=",
"{",
"}",
"records",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"fasta",
".",
"load_fasta_file",
"(",
"self",
".",
"out_sspro8",
")",
"for",
"r",
"in",
"records",
":... | Parse the SSpro8 output file and return a summary of secondary structure composition.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Returns:
dict: Percentage of:
H: alpha-helix
G: 310-helix
I: pi-helix (extremely rare)
E: extended strand
B: beta-bridge
T: turn
S: bend
C: the rest | [
"Parse",
"the",
"SSpro8",
"output",
"file",
"and",
"return",
"a",
"summary",
"of",
"secondary",
"structure",
"composition",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L111-L145 | train | 28,948 |
SBRG/ssbio | ssbio/protein/sequence/properties/scratch.py | SCRATCH.accpro_results | def accpro_results(self):
"""Parse the ACCpro output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_accpro) | python | def accpro_results(self):
"""Parse the ACCpro output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_accpro) | [
"def",
"accpro_results",
"(",
"self",
")",
":",
"return",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"fasta",
".",
"load_fasta_file_as_dict_of_seqs",
"(",
"self",
".",
"out_accpro",
")"
] | Parse the ACCpro output file and return a dict of secondary structure compositions. | [
"Parse",
"the",
"ACCpro",
"output",
"file",
"and",
"return",
"a",
"dict",
"of",
"secondary",
"structure",
"compositions",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L147-L150 | train | 28,949 |
SBRG/ssbio | ssbio/core/modelpro.py | model_loader | def model_loader(gem_file_path, gem_file_type):
"""Consolidated function to load a GEM using COBRApy. Specify the file type being loaded.
Args:
gem_file_path (str): Path to model file
gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format
Returns:
COBRApy Model object.
"""
if gem_file_type.lower() == 'xml' or gem_file_type.lower() == 'sbml':
model = read_sbml_model(gem_file_path)
elif gem_file_type.lower() == 'mat':
model = load_matlab_model(gem_file_path)
elif gem_file_type.lower() == 'json':
model = load_json_model(gem_file_path)
else:
raise ValueError('File type must be "sbml", "xml", "mat", or "json".')
return model | python | def model_loader(gem_file_path, gem_file_type):
"""Consolidated function to load a GEM using COBRApy. Specify the file type being loaded.
Args:
gem_file_path (str): Path to model file
gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format
Returns:
COBRApy Model object.
"""
if gem_file_type.lower() == 'xml' or gem_file_type.lower() == 'sbml':
model = read_sbml_model(gem_file_path)
elif gem_file_type.lower() == 'mat':
model = load_matlab_model(gem_file_path)
elif gem_file_type.lower() == 'json':
model = load_json_model(gem_file_path)
else:
raise ValueError('File type must be "sbml", "xml", "mat", or "json".')
return model | [
"def",
"model_loader",
"(",
"gem_file_path",
",",
"gem_file_type",
")",
":",
"if",
"gem_file_type",
".",
"lower",
"(",
")",
"==",
"'xml'",
"or",
"gem_file_type",
".",
"lower",
"(",
")",
"==",
"'sbml'",
":",
"model",
"=",
"read_sbml_model",
"(",
"gem_file_pat... | Consolidated function to load a GEM using COBRApy. Specify the file type being loaded.
Args:
gem_file_path (str): Path to model file
gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format
Returns:
COBRApy Model object. | [
"Consolidated",
"function",
"to",
"load",
"a",
"GEM",
"using",
"COBRApy",
".",
"Specify",
"the",
"file",
"type",
"being",
"loaded",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/modelpro.py#L140-L161 | train | 28,950 |
SBRG/ssbio | ssbio/core/modelpro.py | filter_out_spontaneous_genes | def filter_out_spontaneous_genes(genes, custom_spont_id=None):
"""Return the DictList of genes that are not spontaneous in a model.
Args:
genes (DictList): Genes DictList
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
DictList: genes excluding ones that are spontaneous
"""
new_genes = DictList()
for gene in genes:
if not is_spontaneous(gene, custom_id=custom_spont_id):
new_genes.append(gene)
return new_genes | python | def filter_out_spontaneous_genes(genes, custom_spont_id=None):
"""Return the DictList of genes that are not spontaneous in a model.
Args:
genes (DictList): Genes DictList
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
DictList: genes excluding ones that are spontaneous
"""
new_genes = DictList()
for gene in genes:
if not is_spontaneous(gene, custom_id=custom_spont_id):
new_genes.append(gene)
return new_genes | [
"def",
"filter_out_spontaneous_genes",
"(",
"genes",
",",
"custom_spont_id",
"=",
"None",
")",
":",
"new_genes",
"=",
"DictList",
"(",
")",
"for",
"gene",
"in",
"genes",
":",
"if",
"not",
"is_spontaneous",
"(",
"gene",
",",
"custom_id",
"=",
"custom_spont_id",... | Return the DictList of genes that are not spontaneous in a model.
Args:
genes (DictList): Genes DictList
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
DictList: genes excluding ones that are spontaneous | [
"Return",
"the",
"DictList",
"of",
"genes",
"that",
"are",
"not",
"spontaneous",
"in",
"a",
"model",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/modelpro.py#L185-L201 | train | 28,951 |
SBRG/ssbio | ssbio/core/modelpro.py | true_num_genes | def true_num_genes(model, custom_spont_id=None):
"""Return the number of genes in a model ignoring spontaneously labeled genes.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of genes excluding spontaneous genes
"""
true_num = 0
for gene in model.genes:
if not is_spontaneous(gene, custom_id=custom_spont_id):
true_num += 1
return true_num | python | def true_num_genes(model, custom_spont_id=None):
"""Return the number of genes in a model ignoring spontaneously labeled genes.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of genes excluding spontaneous genes
"""
true_num = 0
for gene in model.genes:
if not is_spontaneous(gene, custom_id=custom_spont_id):
true_num += 1
return true_num | [
"def",
"true_num_genes",
"(",
"model",
",",
"custom_spont_id",
"=",
"None",
")",
":",
"true_num",
"=",
"0",
"for",
"gene",
"in",
"model",
".",
"genes",
":",
"if",
"not",
"is_spontaneous",
"(",
"gene",
",",
"custom_id",
"=",
"custom_spont_id",
")",
":",
"... | Return the number of genes in a model ignoring spontaneously labeled genes.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of genes excluding spontaneous genes | [
"Return",
"the",
"number",
"of",
"genes",
"in",
"a",
"model",
"ignoring",
"spontaneously",
"labeled",
"genes",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/modelpro.py#L204-L219 | train | 28,952 |
SBRG/ssbio | ssbio/core/modelpro.py | true_num_reactions | def true_num_reactions(model, custom_spont_id=None):
"""Return the number of reactions associated with a gene.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of reactions associated with a gene
"""
true_num = 0
for rxn in model.reactions:
if len(rxn.genes) == 0:
continue
if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id):
continue
else:
true_num += 1
return true_num | python | def true_num_reactions(model, custom_spont_id=None):
"""Return the number of reactions associated with a gene.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of reactions associated with a gene
"""
true_num = 0
for rxn in model.reactions:
if len(rxn.genes) == 0:
continue
if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id):
continue
else:
true_num += 1
return true_num | [
"def",
"true_num_reactions",
"(",
"model",
",",
"custom_spont_id",
"=",
"None",
")",
":",
"true_num",
"=",
"0",
"for",
"rxn",
"in",
"model",
".",
"reactions",
":",
"if",
"len",
"(",
"rxn",
".",
"genes",
")",
"==",
"0",
":",
"continue",
"if",
"len",
"... | Return the number of reactions associated with a gene.
Args:
model (Model):
custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``
Returns:
int: Number of reactions associated with a gene | [
"Return",
"the",
"number",
"of",
"reactions",
"associated",
"with",
"a",
"gene",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/modelpro.py#L222-L241 | train | 28,953 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/WWW/WHATIF.py | WHATIF._smcra_to_str | def _smcra_to_str(self, smcra, temp_dir='/tmp/'):
"""
WHATIF's input are PDB format files.
Converts a SMCRA object to a PDB formatted string.
"""
temp_path = tempfile.mktemp( '.pdb', dir=temp_dir )
io = PDBIO()
io.set_structure(smcra)
io.save(temp_path)
f = open(temp_path, 'r')
string = f.read()
f.close()
os.remove(temp_path)
return string | python | def _smcra_to_str(self, smcra, temp_dir='/tmp/'):
"""
WHATIF's input are PDB format files.
Converts a SMCRA object to a PDB formatted string.
"""
temp_path = tempfile.mktemp( '.pdb', dir=temp_dir )
io = PDBIO()
io.set_structure(smcra)
io.save(temp_path)
f = open(temp_path, 'r')
string = f.read()
f.close()
os.remove(temp_path)
return string | [
"def",
"_smcra_to_str",
"(",
"self",
",",
"smcra",
",",
"temp_dir",
"=",
"'/tmp/'",
")",
":",
"temp_path",
"=",
"tempfile",
".",
"mktemp",
"(",
"'.pdb'",
",",
"dir",
"=",
"temp_dir",
")",
"io",
"=",
"PDBIO",
"(",
")",
"io",
".",
"set_structure",
"(",
... | WHATIF's input are PDB format files.
Converts a SMCRA object to a PDB formatted string. | [
"WHATIF",
"s",
"input",
"are",
"PDB",
"format",
"files",
".",
"Converts",
"a",
"SMCRA",
"object",
"to",
"a",
"PDB",
"formatted",
"string",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/WWW/WHATIF.py#L58-L76 | train | 28,954 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/WWW/WHATIF.py | WHATIF.is_alive | def is_alive(self):
"""
Test Function to check WHAT IF servers are up and running.
"""
u = urllib.urlopen("http://wiws.cmbi.ru.nl/rest/TestEmpty/id/1crn/")
x = xml.dom.minidom.parse(u)
self.alive = len(x.getElementsByTagName("TestEmptyResponse"))
return self.alive | python | def is_alive(self):
"""
Test Function to check WHAT IF servers are up and running.
"""
u = urllib.urlopen("http://wiws.cmbi.ru.nl/rest/TestEmpty/id/1crn/")
x = xml.dom.minidom.parse(u)
self.alive = len(x.getElementsByTagName("TestEmptyResponse"))
return self.alive | [
"def",
"is_alive",
"(",
"self",
")",
":",
"u",
"=",
"urllib",
".",
"urlopen",
"(",
"\"http://wiws.cmbi.ru.nl/rest/TestEmpty/id/1crn/\"",
")",
"x",
"=",
"xml",
".",
"dom",
".",
"minidom",
".",
"parse",
"(",
"u",
")",
"self",
".",
"alive",
"=",
"len",
"(",... | Test Function to check WHAT IF servers are up and running. | [
"Test",
"Function",
"to",
"check",
"WHAT",
"IF",
"servers",
"are",
"up",
"and",
"running",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/WWW/WHATIF.py#L94-L103 | train | 28,955 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/WWW/WHATIF.py | WHATIF.PDBasXMLwithSymwithPolarH | def PDBasXMLwithSymwithPolarH(self, id):
"""
Adds Hydrogen Atoms to a Structure.
"""
print _WARNING
# Protonated Structure in XML Format
h_s_xml = urllib.urlopen("http://www.cmbi.ru.nl/wiwsd/rest/PDBasXMLwithSymwithPolarH/id/" + id)
self.raw = h_s_xml
p = self.parser
h_s_smcra = p.read(h_s_xml, 'WHATIF_Output')
return h_s_smcra | python | def PDBasXMLwithSymwithPolarH(self, id):
"""
Adds Hydrogen Atoms to a Structure.
"""
print _WARNING
# Protonated Structure in XML Format
h_s_xml = urllib.urlopen("http://www.cmbi.ru.nl/wiwsd/rest/PDBasXMLwithSymwithPolarH/id/" + id)
self.raw = h_s_xml
p = self.parser
h_s_smcra = p.read(h_s_xml, 'WHATIF_Output')
return h_s_smcra | [
"def",
"PDBasXMLwithSymwithPolarH",
"(",
"self",
",",
"id",
")",
":",
"print",
"_WARNING",
"# Protonated Structure in XML Format",
"h_s_xml",
"=",
"urllib",
".",
"urlopen",
"(",
"\"http://www.cmbi.ru.nl/wiwsd/rest/PDBasXMLwithSymwithPolarH/id/\"",
"+",
"id",
")",
"self",
... | Adds Hydrogen Atoms to a Structure. | [
"Adds",
"Hydrogen",
"Atoms",
"to",
"a",
"Structure",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/WWW/WHATIF.py#L130-L142 | train | 28,956 |
SBRG/ssbio | ssbio/databases/bigg.py | get_pdbs_for_gene | def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):
"""Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id)
"""
my_structures = []
# Download gene info
gene = ssbio.utils.request_json(link='http://bigg.ucsd.edu/api/v2/models/{}/genes/{}'.format(bigg_model, bigg_gene),
outfile='{}_{}.json'.format(bigg_model, bigg_gene),
outdir=cache_dir,
force_rerun_flag=force_rerun)
uniprots = []
if 'database_links' in gene:
if 'UniProt' in gene['database_links']:
uniprots = [x['id'] for x in gene['database_links']['UniProt']]
elif 'NCBI GI' in gene['database_links']:
uniprots = []
gis = [x['id'] for x in gene['database_links']['NCBI GI']]
gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()
uniprots.extend(gi_uniprots)
uniprots = ssbio.utils.flatlist_dropdup(uniprots)
uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]
if uniprots:
for u in uniprots:
get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)
if get_best_structure:
for best_structure in get_best_structure:
my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))
return my_structures | python | def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):
"""Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id)
"""
my_structures = []
# Download gene info
gene = ssbio.utils.request_json(link='http://bigg.ucsd.edu/api/v2/models/{}/genes/{}'.format(bigg_model, bigg_gene),
outfile='{}_{}.json'.format(bigg_model, bigg_gene),
outdir=cache_dir,
force_rerun_flag=force_rerun)
uniprots = []
if 'database_links' in gene:
if 'UniProt' in gene['database_links']:
uniprots = [x['id'] for x in gene['database_links']['UniProt']]
elif 'NCBI GI' in gene['database_links']:
uniprots = []
gis = [x['id'] for x in gene['database_links']['NCBI GI']]
gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()
uniprots.extend(gi_uniprots)
uniprots = ssbio.utils.flatlist_dropdup(uniprots)
uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]
if uniprots:
for u in uniprots:
get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)
if get_best_structure:
for best_structure in get_best_structure:
my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))
return my_structures | [
"def",
"get_pdbs_for_gene",
"(",
"bigg_model",
",",
"bigg_gene",
",",
"cache_dir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"force_rerun",
"=",
"False",
")",
":",
"my_structures",
"=",
"[",
"]",
"# Download gene info",
"gene",
"=",
"ssbio",
".",
"u... | Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id) | [
"Attempt",
"to",
"get",
"a",
"rank",
"-",
"ordered",
"list",
"of",
"available",
"PDB",
"structures",
"for",
"a",
"BiGG",
"Model",
"and",
"its",
"gene",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/bigg.py#L12-L50 | train | 28,957 |
SBRG/ssbio | ssbio/protein/structure/properties/dssp.py | get_dssp_df_on_file | def get_dssp_df_on_file(pdb_file, outfile=None, outdir=None, outext='_dssp.df', force_rerun=False):
"""Run DSSP directly on a structure file with the Biopython method Bio.PDB.DSSP.dssp_dict_from_pdb_file
Avoids errors like: PDBException: Structure/DSSP mismatch at <Residue MSE het= resseq=19 icode= >
by not matching information to the structure file (DSSP fills in the ID "X" for unknown residues)
Args:
pdb_file: Path to PDB file
outfile: Name of output file
outdir: Path to output directory
outext: Extension of output file
force_rerun: If DSSP should be rerun if the outfile exists
Returns:
DataFrame: DSSP results summarized
"""
# TODO: function unfinished
# Create the output file name
outfile = ssbio.utils.outfile_maker(inname=pdb_file, outname=outfile, outdir=outdir, outext=outext)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
try:
d = dssp_dict_from_pdb_file(pdb_file)
except Exception('DSSP failed to produce an output'):
log.error('{}: unable to run DSSP'.format(pdb_file))
return pd.DataFrame()
appender = []
# TODO: WARNING: d is slightly different than when using function get_dssp_df
for k in d[1]:
to_append = []
y = d[0][k]
chain = k[0]
residue = k[1]
het = residue[0]
resnum = residue[1]
icode = residue[2]
to_append.extend([chain, resnum, icode])
to_append.extend(y)
appender.append(to_append)
cols = ['chain', 'resnum', 'icode',
'dssp_index', 'aa', 'ss', 'exposure_rsa', 'phi', 'psi',
'NH_O_1_relidx', 'NH_O_1_energy', 'O_NH_1_relidx',
'O_NH_1_energy', 'NH_O_2_relidx', 'NH_O_2_energy',
'O_NH_2_relidx', 'O_NH_2_energy']
df = pd.DataFrame.from_records(appender, columns=cols)
# Adding additional columns
df = df[df['aa'].isin(list(aa1))]
df['aa_three'] = df['aa'].apply(one_to_three)
df['max_acc'] = df['aa_three'].map(residue_max_acc['Sander'].get)
df[['exposure_rsa', 'max_acc']] = df[['exposure_rsa', 'max_acc']].astype(float)
df['exposure_asa'] = df['exposure_rsa'] * df['max_acc']
df.to_csv(outfile)
else:
log.debug('{}: already ran DSSP and force_rerun={}, loading results'.format(outfile, force_rerun))
df = pd.read_csv(outfile, index_col=0)
return df | python | def get_dssp_df_on_file(pdb_file, outfile=None, outdir=None, outext='_dssp.df', force_rerun=False):
"""Run DSSP directly on a structure file with the Biopython method Bio.PDB.DSSP.dssp_dict_from_pdb_file
Avoids errors like: PDBException: Structure/DSSP mismatch at <Residue MSE het= resseq=19 icode= >
by not matching information to the structure file (DSSP fills in the ID "X" for unknown residues)
Args:
pdb_file: Path to PDB file
outfile: Name of output file
outdir: Path to output directory
outext: Extension of output file
force_rerun: If DSSP should be rerun if the outfile exists
Returns:
DataFrame: DSSP results summarized
"""
# TODO: function unfinished
# Create the output file name
outfile = ssbio.utils.outfile_maker(inname=pdb_file, outname=outfile, outdir=outdir, outext=outext)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
try:
d = dssp_dict_from_pdb_file(pdb_file)
except Exception('DSSP failed to produce an output'):
log.error('{}: unable to run DSSP'.format(pdb_file))
return pd.DataFrame()
appender = []
# TODO: WARNING: d is slightly different than when using function get_dssp_df
for k in d[1]:
to_append = []
y = d[0][k]
chain = k[0]
residue = k[1]
het = residue[0]
resnum = residue[1]
icode = residue[2]
to_append.extend([chain, resnum, icode])
to_append.extend(y)
appender.append(to_append)
cols = ['chain', 'resnum', 'icode',
'dssp_index', 'aa', 'ss', 'exposure_rsa', 'phi', 'psi',
'NH_O_1_relidx', 'NH_O_1_energy', 'O_NH_1_relidx',
'O_NH_1_energy', 'NH_O_2_relidx', 'NH_O_2_energy',
'O_NH_2_relidx', 'O_NH_2_energy']
df = pd.DataFrame.from_records(appender, columns=cols)
# Adding additional columns
df = df[df['aa'].isin(list(aa1))]
df['aa_three'] = df['aa'].apply(one_to_three)
df['max_acc'] = df['aa_three'].map(residue_max_acc['Sander'].get)
df[['exposure_rsa', 'max_acc']] = df[['exposure_rsa', 'max_acc']].astype(float)
df['exposure_asa'] = df['exposure_rsa'] * df['max_acc']
df.to_csv(outfile)
else:
log.debug('{}: already ran DSSP and force_rerun={}, loading results'.format(outfile, force_rerun))
df = pd.read_csv(outfile, index_col=0)
return df | [
"def",
"get_dssp_df_on_file",
"(",
"pdb_file",
",",
"outfile",
"=",
"None",
",",
"outdir",
"=",
"None",
",",
"outext",
"=",
"'_dssp.df'",
",",
"force_rerun",
"=",
"False",
")",
":",
"# TODO: function unfinished",
"# Create the output file name",
"outfile",
"=",
"s... | Run DSSP directly on a structure file with the Biopython method Bio.PDB.DSSP.dssp_dict_from_pdb_file
Avoids errors like: PDBException: Structure/DSSP mismatch at <Residue MSE het= resseq=19 icode= >
by not matching information to the structure file (DSSP fills in the ID "X" for unknown residues)
Args:
pdb_file: Path to PDB file
outfile: Name of output file
outdir: Path to output directory
outext: Extension of output file
force_rerun: If DSSP should be rerun if the outfile exists
Returns:
DataFrame: DSSP results summarized | [
"Run",
"DSSP",
"directly",
"on",
"a",
"structure",
"file",
"with",
"the",
"Biopython",
"method",
"Bio",
".",
"PDB",
".",
"DSSP",
".",
"dssp_dict_from_pdb_file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/dssp.py#L66-L128 | train | 28,958 |
SBRG/ssbio | ssbio/protein/structure/properties/dssp.py | secondary_structure_summary | def secondary_structure_summary(dssp_df):
"""Summarize the secondary structure content of the DSSP dataframe for each chain.
Args:
dssp_df: Pandas DataFrame of parsed DSSP results
Returns:
dict: Chain to secondary structure summary dictionary
"""
chains = dssp_df.chain.unique()
infodict = {}
for chain in chains:
expoinfo = defaultdict(int)
chain_df = dssp_df[dssp_df.chain == chain]
counts = chain_df.ss.value_counts()
total = float(len(chain_df))
for ss, count in iteritems(counts):
if ss == '-':
expoinfo['percent_C-dssp'] = count/total
if ss == 'H':
expoinfo['percent_H-dssp'] = count/total
if ss == 'B':
expoinfo['percent_B-dssp'] = count/total
if ss == 'E':
expoinfo['percent_E-dssp'] = count/total
if ss == 'G':
expoinfo['percent_G-dssp'] = count/total
if ss == 'I':
expoinfo['percent_I-dssp'] = count/total
if ss == 'T':
expoinfo['percent_T-dssp'] = count/total
if ss == 'S':
expoinfo['percent_S-dssp'] = count/total
# Filling in 0 percenters
for per in ['percent_C-dssp','percent_H-dssp','percent_B-dssp','percent_E-dssp',
'percent_G-dssp','percent_I-dssp','percent_T-dssp','percent_S-dssp']:
if per not in expoinfo:
expoinfo[per] = 0.0
infodict[chain] = dict(expoinfo)
return infodict | python | def secondary_structure_summary(dssp_df):
"""Summarize the secondary structure content of the DSSP dataframe for each chain.
Args:
dssp_df: Pandas DataFrame of parsed DSSP results
Returns:
dict: Chain to secondary structure summary dictionary
"""
chains = dssp_df.chain.unique()
infodict = {}
for chain in chains:
expoinfo = defaultdict(int)
chain_df = dssp_df[dssp_df.chain == chain]
counts = chain_df.ss.value_counts()
total = float(len(chain_df))
for ss, count in iteritems(counts):
if ss == '-':
expoinfo['percent_C-dssp'] = count/total
if ss == 'H':
expoinfo['percent_H-dssp'] = count/total
if ss == 'B':
expoinfo['percent_B-dssp'] = count/total
if ss == 'E':
expoinfo['percent_E-dssp'] = count/total
if ss == 'G':
expoinfo['percent_G-dssp'] = count/total
if ss == 'I':
expoinfo['percent_I-dssp'] = count/total
if ss == 'T':
expoinfo['percent_T-dssp'] = count/total
if ss == 'S':
expoinfo['percent_S-dssp'] = count/total
# Filling in 0 percenters
for per in ['percent_C-dssp','percent_H-dssp','percent_B-dssp','percent_E-dssp',
'percent_G-dssp','percent_I-dssp','percent_T-dssp','percent_S-dssp']:
if per not in expoinfo:
expoinfo[per] = 0.0
infodict[chain] = dict(expoinfo)
return infodict | [
"def",
"secondary_structure_summary",
"(",
"dssp_df",
")",
":",
"chains",
"=",
"dssp_df",
".",
"chain",
".",
"unique",
"(",
")",
"infodict",
"=",
"{",
"}",
"for",
"chain",
"in",
"chains",
":",
"expoinfo",
"=",
"defaultdict",
"(",
"int",
")",
"chain_df",
... | Summarize the secondary structure content of the DSSP dataframe for each chain.
Args:
dssp_df: Pandas DataFrame of parsed DSSP results
Returns:
dict: Chain to secondary structure summary dictionary | [
"Summarize",
"the",
"secondary",
"structure",
"content",
"of",
"the",
"DSSP",
"dataframe",
"for",
"each",
"chain",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/dssp.py#L131-L176 | train | 28,959 |
SBRG/ssbio | ssbio/protein/structure/properties/dssp.py | calc_surface_buried | def calc_surface_buried(dssp_df):
'''Calculates the percent of residues that are in the surface or buried,
as well as if they are polar or nonpolar. Returns a dictionary of this.
'''
SN = 0
BN = 0
SP = 0
SNP = 0
SPo = 0
SNe = 0
BNP = 0
BP = 0
BPo = 0
BNe = 0
Total = 0
sbinfo = {}
df_min = dssp_df[['aa_three', 'exposure_asa']]
if len(df_min) == 0:
return sbinfo
else:
for i, r in df_min.iterrows():
res = r.aa_three
area = r.exposure_asa
if res in AAdict:
if AAdict[res] == 'nonpolar' and area > 3:
SNP = SNP + 1
SN = SN + 1
elif AAdict[res] == 'polar' and area > 3:
SP = SP + 1
SN = SN + 1
elif AAdict[res] == 'positive' and area > 3:
SPo = SPo + 1
SN = SN + 1
elif AAdict[res] == 'negative' and area > 3:
SNe = SNe + 1
SN = SN + 1
elif AAdict[res] == 'positive' and area <= 3:
BPo = BPo + 1
BN = BN + 1
elif AAdict[res] == 'negative' and area <= 3:
BNe = BNe + 1
BN = BN + 1
elif AAdict[res] == 'polar' and area <= 3:
BP = BP + 1
BN = BN + 1
elif AAdict[res] == 'nonpolar' and area <= 3:
BNP = BNP + 1
BN = BN + 1
Total = float(BN + SN)
pSNP = float(SNP) / Total
pSP = float(SP) / Total
pSPo = float(SPo) / Total
pSNe = float(SNe) / Total
pBNP = float(BNP) / Total
pBP = float(BP) / Total
pBPo = float(BPo) / Total
pBNe = float(BNe) / Total
pBN = float(BN) / Total
pSN = float(SN) / Total
sbinfo['ssb_per_S_NP'] = pSNP
sbinfo['ssb_per_S_P'] = pSP
sbinfo['ssb_per_S_pos'] = pSPo
sbinfo['ssb_per_S_neg'] = pSNe
sbinfo['ssb_per_B_NP'] = pBNP
sbinfo['ssb_per_B_P'] = pBP
sbinfo['ssb_per_B_pos'] = pBPo
sbinfo['ssb_per_B_neg'] = pBNe
sbinfo['ssb_per_S'] = pSN
sbinfo['ssb_per_B'] = pBN
return sbinfo | python | def calc_surface_buried(dssp_df):
'''Calculates the percent of residues that are in the surface or buried,
as well as if they are polar or nonpolar. Returns a dictionary of this.
'''
SN = 0
BN = 0
SP = 0
SNP = 0
SPo = 0
SNe = 0
BNP = 0
BP = 0
BPo = 0
BNe = 0
Total = 0
sbinfo = {}
df_min = dssp_df[['aa_three', 'exposure_asa']]
if len(df_min) == 0:
return sbinfo
else:
for i, r in df_min.iterrows():
res = r.aa_three
area = r.exposure_asa
if res in AAdict:
if AAdict[res] == 'nonpolar' and area > 3:
SNP = SNP + 1
SN = SN + 1
elif AAdict[res] == 'polar' and area > 3:
SP = SP + 1
SN = SN + 1
elif AAdict[res] == 'positive' and area > 3:
SPo = SPo + 1
SN = SN + 1
elif AAdict[res] == 'negative' and area > 3:
SNe = SNe + 1
SN = SN + 1
elif AAdict[res] == 'positive' and area <= 3:
BPo = BPo + 1
BN = BN + 1
elif AAdict[res] == 'negative' and area <= 3:
BNe = BNe + 1
BN = BN + 1
elif AAdict[res] == 'polar' and area <= 3:
BP = BP + 1
BN = BN + 1
elif AAdict[res] == 'nonpolar' and area <= 3:
BNP = BNP + 1
BN = BN + 1
Total = float(BN + SN)
pSNP = float(SNP) / Total
pSP = float(SP) / Total
pSPo = float(SPo) / Total
pSNe = float(SNe) / Total
pBNP = float(BNP) / Total
pBP = float(BP) / Total
pBPo = float(BPo) / Total
pBNe = float(BNe) / Total
pBN = float(BN) / Total
pSN = float(SN) / Total
sbinfo['ssb_per_S_NP'] = pSNP
sbinfo['ssb_per_S_P'] = pSP
sbinfo['ssb_per_S_pos'] = pSPo
sbinfo['ssb_per_S_neg'] = pSNe
sbinfo['ssb_per_B_NP'] = pBNP
sbinfo['ssb_per_B_P'] = pBP
sbinfo['ssb_per_B_pos'] = pBPo
sbinfo['ssb_per_B_neg'] = pBNe
sbinfo['ssb_per_S'] = pSN
sbinfo['ssb_per_B'] = pBN
return sbinfo | [
"def",
"calc_surface_buried",
"(",
"dssp_df",
")",
":",
"SN",
"=",
"0",
"BN",
"=",
"0",
"SP",
"=",
"0",
"SNP",
"=",
"0",
"SPo",
"=",
"0",
"SNe",
"=",
"0",
"BNP",
"=",
"0",
"BP",
"=",
"0",
"BPo",
"=",
"0",
"BNe",
"=",
"0",
"Total",
"=",
"0",... | Calculates the percent of residues that are in the surface or buried,
as well as if they are polar or nonpolar. Returns a dictionary of this. | [
"Calculates",
"the",
"percent",
"of",
"residues",
"that",
"are",
"in",
"the",
"surface",
"or",
"buried",
"as",
"well",
"as",
"if",
"they",
"are",
"polar",
"or",
"nonpolar",
".",
"Returns",
"a",
"dictionary",
"of",
"this",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/dssp.py#L180-L253 | train | 28,960 |
SBRG/ssbio | ssbio/protein/structure/properties/dssp.py | calc_sasa | def calc_sasa(dssp_df):
"""
Calculation of SASA utilizing the DSSP program.
DSSP must be installed for biopython to properly call it.
Install using apt-get on Ubuntu
or from: http://swift.cmbi.ru.nl/gv/dssp/
Input: PDB or CIF structure file
Output: SASA (integer) of structure
"""
infodict = {'ssb_sasa': dssp_df.exposure_asa.sum(),
'ssb_mean_rel_exposed': dssp_df.exposure_rsa.mean(),
'ssb_size': len(dssp_df)}
return infodict | python | def calc_sasa(dssp_df):
"""
Calculation of SASA utilizing the DSSP program.
DSSP must be installed for biopython to properly call it.
Install using apt-get on Ubuntu
or from: http://swift.cmbi.ru.nl/gv/dssp/
Input: PDB or CIF structure file
Output: SASA (integer) of structure
"""
infodict = {'ssb_sasa': dssp_df.exposure_asa.sum(),
'ssb_mean_rel_exposed': dssp_df.exposure_rsa.mean(),
'ssb_size': len(dssp_df)}
return infodict | [
"def",
"calc_sasa",
"(",
"dssp_df",
")",
":",
"infodict",
"=",
"{",
"'ssb_sasa'",
":",
"dssp_df",
".",
"exposure_asa",
".",
"sum",
"(",
")",
",",
"'ssb_mean_rel_exposed'",
":",
"dssp_df",
".",
"exposure_rsa",
".",
"mean",
"(",
")",
",",
"'ssb_size'",
":",
... | Calculation of SASA utilizing the DSSP program.
DSSP must be installed for biopython to properly call it.
Install using apt-get on Ubuntu
or from: http://swift.cmbi.ru.nl/gv/dssp/
Input: PDB or CIF structure file
Output: SASA (integer) of structure | [
"Calculation",
"of",
"SASA",
"utilizing",
"the",
"DSSP",
"program",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/dssp.py#L256-L272 | train | 28,961 |
SBRG/ssbio | ssbio/protein/structure/properties/dssp.py | get_ss_class | def get_ss_class(pdb_file, dssp_file, chain):
"""Define the secondary structure class of a PDB file at the specific chain
Args:
pdb_file:
dssp_file:
chain:
Returns:
"""
prag = pr.parsePDB(pdb_file)
pr.parseDSSP(dssp_file, prag)
alpha, threeTen, beta = get_dssp_ss_content_multiplechains(prag, chain)
if alpha == 0 and beta > 0:
classification = 'all-beta'
elif beta == 0 and alpha > 0:
classification = 'all-alpha'
elif beta == 0 and alpha == 0:
classification = 'mixed'
elif float(alpha) / beta >= 20:
classification = 'all-alpha'
else:
classification = 'mixed'
return classification | python | def get_ss_class(pdb_file, dssp_file, chain):
"""Define the secondary structure class of a PDB file at the specific chain
Args:
pdb_file:
dssp_file:
chain:
Returns:
"""
prag = pr.parsePDB(pdb_file)
pr.parseDSSP(dssp_file, prag)
alpha, threeTen, beta = get_dssp_ss_content_multiplechains(prag, chain)
if alpha == 0 and beta > 0:
classification = 'all-beta'
elif beta == 0 and alpha > 0:
classification = 'all-alpha'
elif beta == 0 and alpha == 0:
classification = 'mixed'
elif float(alpha) / beta >= 20:
classification = 'all-alpha'
else:
classification = 'mixed'
return classification | [
"def",
"get_ss_class",
"(",
"pdb_file",
",",
"dssp_file",
",",
"chain",
")",
":",
"prag",
"=",
"pr",
".",
"parsePDB",
"(",
"pdb_file",
")",
"pr",
".",
"parseDSSP",
"(",
"dssp_file",
",",
"prag",
")",
"alpha",
",",
"threeTen",
",",
"beta",
"=",
"get_dss... | Define the secondary structure class of a PDB file at the specific chain
Args:
pdb_file:
dssp_file:
chain:
Returns: | [
"Define",
"the",
"secondary",
"structure",
"class",
"of",
"a",
"PDB",
"file",
"at",
"the",
"specific",
"chain"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/dssp.py#L293-L319 | train | 28,962 |
SBRG/ssbio | ssbio/databases/uniprot.py | parse_uniprot_xml_metadata | def parse_uniprot_xml_metadata(sr):
"""Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.
Returns:
dict: All parsed information
"""
# TODO: What about "reviewed" status? and EC number
xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq']
infodict = {}
infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id]))
infodict['gene_name'] = None
if 'gene_name_primary' in sr.annotations:
infodict['gene_name'] = sr.annotations['gene_name_primary']
infodict['description'] = sr.description
infodict['taxonomy'] = None
if 'organism' in sr.annotations:
infodict['taxonomy'] = sr.annotations['organism']
infodict['seq_version'] = sr.annotations['sequence_version']
infodict['seq_date'] = sr.annotations['sequence_modified']
infodict['entry_version'] = sr.annotations['version']
infodict['entry_date'] = sr.annotations['modified']
tmp = defaultdict(list)
for xref in sr.dbxrefs:
database = xref.split(':', 1)[0]
xrefs = xref.split(':', 1)[-1]
if database in xref_dbs_to_keep:
if database == 'PDB':
tmp['pdbs'].append(xrefs)
else:
tmp[database.lower()].append(xrefs)
infodict.update(tmp)
return infodict | python | def parse_uniprot_xml_metadata(sr):
"""Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.
Returns:
dict: All parsed information
"""
# TODO: What about "reviewed" status? and EC number
xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq']
infodict = {}
infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id]))
infodict['gene_name'] = None
if 'gene_name_primary' in sr.annotations:
infodict['gene_name'] = sr.annotations['gene_name_primary']
infodict['description'] = sr.description
infodict['taxonomy'] = None
if 'organism' in sr.annotations:
infodict['taxonomy'] = sr.annotations['organism']
infodict['seq_version'] = sr.annotations['sequence_version']
infodict['seq_date'] = sr.annotations['sequence_modified']
infodict['entry_version'] = sr.annotations['version']
infodict['entry_date'] = sr.annotations['modified']
tmp = defaultdict(list)
for xref in sr.dbxrefs:
database = xref.split(':', 1)[0]
xrefs = xref.split(':', 1)[-1]
if database in xref_dbs_to_keep:
if database == 'PDB':
tmp['pdbs'].append(xrefs)
else:
tmp[database.lower()].append(xrefs)
infodict.update(tmp)
return infodict | [
"def",
"parse_uniprot_xml_metadata",
"(",
"sr",
")",
":",
"# TODO: What about \"reviewed\" status? and EC number",
"xref_dbs_to_keep",
"=",
"[",
"'GO'",
",",
"'KEGG'",
",",
"'PDB'",
",",
"'PROSITE'",
",",
"'Pfam'",
",",
"'RefSeq'",
"]",
"infodict",
"=",
"{",
"}",
... | Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.
Returns:
dict: All parsed information | [
"Load",
"relevant",
"attributes",
"and",
"dbxrefs",
"from",
"a",
"parsed",
"UniProt",
"XML",
"file",
"in",
"a",
"SeqRecord",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L238-L276 | train | 28,963 |
SBRG/ssbio | ssbio/databases/uniprot.py | is_valid_uniprot_id | def is_valid_uniprot_id(instring):
"""Check if a string is a valid UniProt ID.
See regex from: http://www.uniprot.org/help/accession_numbers
Args:
instring: any string identifier
Returns: True if the string is a valid UniProt ID
"""
valid_id = re.compile("[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}")
if valid_id.match(str(instring)):
return True
else:
return False | python | def is_valid_uniprot_id(instring):
"""Check if a string is a valid UniProt ID.
See regex from: http://www.uniprot.org/help/accession_numbers
Args:
instring: any string identifier
Returns: True if the string is a valid UniProt ID
"""
valid_id = re.compile("[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}")
if valid_id.match(str(instring)):
return True
else:
return False | [
"def",
"is_valid_uniprot_id",
"(",
"instring",
")",
":",
"valid_id",
"=",
"re",
".",
"compile",
"(",
"\"[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}\"",
")",
"if",
"valid_id",
".",
"match",
"(",
"str",
"(",
"instring",
")",
")",
":",
"return"... | Check if a string is a valid UniProt ID.
See regex from: http://www.uniprot.org/help/accession_numbers
Args:
instring: any string identifier
Returns: True if the string is a valid UniProt ID | [
"Check",
"if",
"a",
"string",
"is",
"a",
"valid",
"UniProt",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L279-L294 | train | 28,964 |
SBRG/ssbio | ssbio/databases/uniprot.py | uniprot_reviewed_checker | def uniprot_reviewed_checker(uniprot_id):
"""Check if a single UniProt ID is reviewed or not.
Args:
uniprot_id:
Returns:
bool: If the entry is reviewed
"""
query_string = 'id:' + uniprot_id
uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))
uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0)
uni_rev_df = uni_rev_df.fillna(False)
uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]
uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True)
uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False)
uni_rev_dict_adder = uni_rev_df.to_dict()['Status']
return uni_rev_dict_adder[uniprot_id] | python | def uniprot_reviewed_checker(uniprot_id):
"""Check if a single UniProt ID is reviewed or not.
Args:
uniprot_id:
Returns:
bool: If the entry is reviewed
"""
query_string = 'id:' + uniprot_id
uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))
uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0)
uni_rev_df = uni_rev_df.fillna(False)
uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]
uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True)
uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False)
uni_rev_dict_adder = uni_rev_df.to_dict()['Status']
return uni_rev_dict_adder[uniprot_id] | [
"def",
"uniprot_reviewed_checker",
"(",
"uniprot_id",
")",
":",
"query_string",
"=",
"'id:'",
"+",
"uniprot_id",
"uni_rev_raw",
"=",
"StringIO",
"(",
"bsup",
".",
"search",
"(",
"query_string",
",",
"columns",
"=",
"'id,reviewed'",
",",
"frmt",
"=",
"'tab'",
"... | Check if a single UniProt ID is reviewed or not.
Args:
uniprot_id:
Returns:
bool: If the entry is reviewed | [
"Check",
"if",
"a",
"single",
"UniProt",
"ID",
"is",
"reviewed",
"or",
"not",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L328-L350 | train | 28,965 |
SBRG/ssbio | ssbio/databases/uniprot.py | uniprot_reviewed_checker_batch | def uniprot_reviewed_checker_batch(uniprot_ids):
"""Batch check if uniprot IDs are reviewed or not
Args:
uniprot_ids: UniProt ID or list of UniProt IDs
Returns:
A dictionary of {UniProtID: Boolean}
"""
uniprot_ids = ssbio.utils.force_list(uniprot_ids)
invalid_ids = [i for i in uniprot_ids if not is_valid_uniprot_id(i)]
uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)]
if invalid_ids:
warnings.warn("Invalid UniProt IDs {} will be ignored".format(invalid_ids))
# splitting query up into managable sizes (200 IDs each)
Nmax = 200
N, rest = divmod(len(uniprot_ids), Nmax)
uni_rev_dict = {}
if rest > 0:
N += 1
for i in range(0, N):
i1 = i * Nmax
i2 = (i + 1) * Nmax
if i2 > len(uniprot_ids):
i2 = len(uniprot_ids)
query = uniprot_ids[i1:i2]
query_string = ''
for x in query:
query_string += 'id:' + x + '+OR+'
query_string = query_string.strip('+OR+')
uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))
uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0)
uni_rev_df = uni_rev_df.fillna(False)
# no_metadata = uni_rev_df[pd.isnull(uni_rev_df.Status)].index.tolist()
# if no_metadata:
# warnings.warn("Unable to retrieve metadata for {}.".format(no_metadata))
uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]
uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True)
uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False)
uni_rev_dict_adder = uni_rev_df.to_dict()['Status']
uni_rev_dict.update(uni_rev_dict_adder)
return uni_rev_dict | python | def uniprot_reviewed_checker_batch(uniprot_ids):
"""Batch check if uniprot IDs are reviewed or not
Args:
uniprot_ids: UniProt ID or list of UniProt IDs
Returns:
A dictionary of {UniProtID: Boolean}
"""
uniprot_ids = ssbio.utils.force_list(uniprot_ids)
invalid_ids = [i for i in uniprot_ids if not is_valid_uniprot_id(i)]
uniprot_ids = [i for i in uniprot_ids if is_valid_uniprot_id(i)]
if invalid_ids:
warnings.warn("Invalid UniProt IDs {} will be ignored".format(invalid_ids))
# splitting query up into managable sizes (200 IDs each)
Nmax = 200
N, rest = divmod(len(uniprot_ids), Nmax)
uni_rev_dict = {}
if rest > 0:
N += 1
for i in range(0, N):
i1 = i * Nmax
i2 = (i + 1) * Nmax
if i2 > len(uniprot_ids):
i2 = len(uniprot_ids)
query = uniprot_ids[i1:i2]
query_string = ''
for x in query:
query_string += 'id:' + x + '+OR+'
query_string = query_string.strip('+OR+')
uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))
uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0)
uni_rev_df = uni_rev_df.fillna(False)
# no_metadata = uni_rev_df[pd.isnull(uni_rev_df.Status)].index.tolist()
# if no_metadata:
# warnings.warn("Unable to retrieve metadata for {}.".format(no_metadata))
uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]
uni_rev_df = uni_rev_df.replace(to_replace="reviewed", value=True)
uni_rev_df = uni_rev_df.replace(to_replace="unreviewed", value=False)
uni_rev_dict_adder = uni_rev_df.to_dict()['Status']
uni_rev_dict.update(uni_rev_dict_adder)
return uni_rev_dict | [
"def",
"uniprot_reviewed_checker_batch",
"(",
"uniprot_ids",
")",
":",
"uniprot_ids",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"uniprot_ids",
")",
"invalid_ids",
"=",
"[",
"i",
"for",
"i",
"in",
"uniprot_ids",
"if",
"not",
"is_valid_uniprot_id",
"(",
... | Batch check if uniprot IDs are reviewed or not
Args:
uniprot_ids: UniProt ID or list of UniProt IDs
Returns:
A dictionary of {UniProtID: Boolean} | [
"Batch",
"check",
"if",
"uniprot",
"IDs",
"are",
"reviewed",
"or",
"not"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L353-L406 | train | 28,966 |
SBRG/ssbio | ssbio/databases/uniprot.py | uniprot_ec | def uniprot_ec(uniprot_id):
"""Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id)
ec = r.content.decode('utf-8').splitlines()[1]
if len(ec) == 0:
ec = None
return ec | python | def uniprot_ec(uniprot_id):
"""Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id)
ec = r.content.decode('utf-8').splitlines()[1]
if len(ec) == 0:
ec = None
return ec | [
"def",
"uniprot_ec",
"(",
"uniprot_id",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"'http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab'",
"%",
"uniprot_id",
")",
"ec",
"=",
"r",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"splitl... | Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns: | [
"Retrieve",
"the",
"EC",
"number",
"annotation",
"for",
"a",
"UniProt",
"ID",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L409-L424 | train | 28,967 |
SBRG/ssbio | ssbio/databases/uniprot.py | uniprot_sites | def uniprot_sites(uniprot_id):
"""Retrieve a list of UniProt sites parsed from the feature file
Sites are defined here: http://www.uniprot.org/help/site and here: http://www.uniprot.org/help/function_section
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/%s.gff' % uniprot_id)
gff = StringIO(r.content.decode('utf-8'))
feats = list(GFF.parse(gff))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
return feats[0].features | python | def uniprot_sites(uniprot_id):
"""Retrieve a list of UniProt sites parsed from the feature file
Sites are defined here: http://www.uniprot.org/help/site and here: http://www.uniprot.org/help/function_section
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/%s.gff' % uniprot_id)
gff = StringIO(r.content.decode('utf-8'))
feats = list(GFF.parse(gff))
if len(feats) > 1:
log.warning('Too many sequences in GFF')
else:
return feats[0].features | [
"def",
"uniprot_sites",
"(",
"uniprot_id",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"'http://www.uniprot.org/uniprot/%s.gff'",
"%",
"uniprot_id",
")",
"gff",
"=",
"StringIO",
"(",
"r",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"feats"... | Retrieve a list of UniProt sites parsed from the feature file
Sites are defined here: http://www.uniprot.org/help/site and here: http://www.uniprot.org/help/function_section
Args:
uniprot_id: Valid UniProt ID
Returns: | [
"Retrieve",
"a",
"list",
"of",
"UniProt",
"sites",
"parsed",
"from",
"the",
"feature",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L427-L446 | train | 28,968 |
SBRG/ssbio | ssbio/databases/uniprot.py | parse_uniprot_txt_file | def parse_uniprot_txt_file(infile):
"""Parse a raw UniProt metadata file and return a dictionary.
Args:
infile: Path to metadata file
Returns:
dict: Metadata dictionary
"""
uniprot_metadata_dict = {}
metadata = old_parse_uniprot_txt_file(infile)
metadata_keys = list(metadata.keys())
if metadata_keys:
metadata_key = metadata_keys[0]
else:
return uniprot_metadata_dict
uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence']))
uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed']
uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version']
uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version']
if 'gene' in metadata[metadata_key]:
uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene']
if 'description' in metadata[metadata_key]:
uniprot_metadata_dict['description'] = metadata[metadata_key]['description']
if 'refseq' in metadata[metadata_key]:
uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq']
if 'kegg' in metadata[metadata_key]:
uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg']
if 'ec' in metadata[metadata_key]:
uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec']
if 'pfam' in metadata[metadata_key]:
uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam']
if 'pdbs' in metadata[metadata_key]:
uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs']))
return uniprot_metadata_dict | python | def parse_uniprot_txt_file(infile):
"""Parse a raw UniProt metadata file and return a dictionary.
Args:
infile: Path to metadata file
Returns:
dict: Metadata dictionary
"""
uniprot_metadata_dict = {}
metadata = old_parse_uniprot_txt_file(infile)
metadata_keys = list(metadata.keys())
if metadata_keys:
metadata_key = metadata_keys[0]
else:
return uniprot_metadata_dict
uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence']))
uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed']
uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version']
uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version']
if 'gene' in metadata[metadata_key]:
uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene']
if 'description' in metadata[metadata_key]:
uniprot_metadata_dict['description'] = metadata[metadata_key]['description']
if 'refseq' in metadata[metadata_key]:
uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq']
if 'kegg' in metadata[metadata_key]:
uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg']
if 'ec' in metadata[metadata_key]:
uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec']
if 'pfam' in metadata[metadata_key]:
uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam']
if 'pdbs' in metadata[metadata_key]:
uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs']))
return uniprot_metadata_dict | [
"def",
"parse_uniprot_txt_file",
"(",
"infile",
")",
":",
"uniprot_metadata_dict",
"=",
"{",
"}",
"metadata",
"=",
"old_parse_uniprot_txt_file",
"(",
"infile",
")",
"metadata_keys",
"=",
"list",
"(",
"metadata",
".",
"keys",
"(",
")",
")",
"if",
"metadata_keys",... | Parse a raw UniProt metadata file and return a dictionary.
Args:
infile: Path to metadata file
Returns:
dict: Metadata dictionary | [
"Parse",
"a",
"raw",
"UniProt",
"metadata",
"file",
"and",
"return",
"a",
"dictionary",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L484-L522 | train | 28,969 |
SBRG/ssbio | ssbio/databases/uniprot.py | UniProtProp.metadata_path_unset | def metadata_path_unset(self):
"""Copy features to memory and remove the association of the metadata file."""
if not self.metadata_file:
raise IOError('No metadata file to unset')
log.debug('{}: reading from metadata file {}'.format(self.id, self.metadata_path))
tmp_sr = SeqIO.read(self.metadata_path, 'uniprot-xml')
tmp_feats = tmp_sr.features
# TODO: should this be in separate unset functions?
self.metadata_dir = None
self.metadata_file = None
self.features = tmp_feats
if self.sequence_file:
tmp_sr = tmp_sr.seq
self.sequence_dir = None
self.sequence_file = None
self.seq = tmp_sr | python | def metadata_path_unset(self):
"""Copy features to memory and remove the association of the metadata file."""
if not self.metadata_file:
raise IOError('No metadata file to unset')
log.debug('{}: reading from metadata file {}'.format(self.id, self.metadata_path))
tmp_sr = SeqIO.read(self.metadata_path, 'uniprot-xml')
tmp_feats = tmp_sr.features
# TODO: should this be in separate unset functions?
self.metadata_dir = None
self.metadata_file = None
self.features = tmp_feats
if self.sequence_file:
tmp_sr = tmp_sr.seq
self.sequence_dir = None
self.sequence_file = None
self.seq = tmp_sr | [
"def",
"metadata_path_unset",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"metadata_file",
":",
"raise",
"IOError",
"(",
"'No metadata file to unset'",
")",
"log",
".",
"debug",
"(",
"'{}: reading from metadata file {}'",
".",
"format",
"(",
"self",
".",
"i... | Copy features to memory and remove the association of the metadata file. | [
"Copy",
"features",
"to",
"memory",
"and",
"remove",
"the",
"association",
"of",
"the",
"metadata",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L177-L195 | train | 28,970 |
SBRG/ssbio | ssbio/databases/uniprot.py | UniProtProp.download_seq_file | def download_seq_file(self, outdir, force_rerun=False):
"""Download and load the UniProt FASTA file"""
uniprot_fasta_file = download_uniprot_file(uniprot_id=self.id,
filetype='fasta',
outdir=outdir,
force_rerun=force_rerun)
self.sequence_path = uniprot_fasta_file | python | def download_seq_file(self, outdir, force_rerun=False):
"""Download and load the UniProt FASTA file"""
uniprot_fasta_file = download_uniprot_file(uniprot_id=self.id,
filetype='fasta',
outdir=outdir,
force_rerun=force_rerun)
self.sequence_path = uniprot_fasta_file | [
"def",
"download_seq_file",
"(",
"self",
",",
"outdir",
",",
"force_rerun",
"=",
"False",
")",
":",
"uniprot_fasta_file",
"=",
"download_uniprot_file",
"(",
"uniprot_id",
"=",
"self",
".",
"id",
",",
"filetype",
"=",
"'fasta'",
",",
"outdir",
"=",
"outdir",
... | Download and load the UniProt FASTA file | [
"Download",
"and",
"load",
"the",
"UniProt",
"FASTA",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L197-L205 | train | 28,971 |
SBRG/ssbio | ssbio/databases/uniprot.py | UniProtProp.download_metadata_file | def download_metadata_file(self, outdir, force_rerun=False):
"""Download and load the UniProt XML file"""
uniprot_xml_file = download_uniprot_file(uniprot_id=self.id,
outdir=outdir,
filetype='xml',
force_rerun=force_rerun)
self.metadata_path = uniprot_xml_file | python | def download_metadata_file(self, outdir, force_rerun=False):
"""Download and load the UniProt XML file"""
uniprot_xml_file = download_uniprot_file(uniprot_id=self.id,
outdir=outdir,
filetype='xml',
force_rerun=force_rerun)
self.metadata_path = uniprot_xml_file | [
"def",
"download_metadata_file",
"(",
"self",
",",
"outdir",
",",
"force_rerun",
"=",
"False",
")",
":",
"uniprot_xml_file",
"=",
"download_uniprot_file",
"(",
"uniprot_id",
"=",
"self",
".",
"id",
",",
"outdir",
"=",
"outdir",
",",
"filetype",
"=",
"'xml'",
... | Download and load the UniProt XML file | [
"Download",
"and",
"load",
"the",
"UniProt",
"XML",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/uniprot.py#L207-L214 | train | 28,972 |
SBRG/ssbio | ssbio/core/object.py | Object.save_dataframes | def save_dataframes(self, outdir, prefix='df_'):
"""Save all attributes that start with "df" into a specified directory.
Args:
outdir (str): Path to output directory
prefix (str): Prefix that dataframe attributes start with
"""
# Get list of attributes that start with "df_"
dfs = list(filter(lambda x: x.startswith(prefix), dir(self)))
counter = 0
for df in dfs:
outpath = ssbio.utils.outfile_maker(inname=df, outext='.csv', outdir=outdir)
my_df = getattr(self, df)
if not isinstance(my_df, pd.DataFrame):
raise TypeError('{}: object is not a Pandas DataFrame'.format(df))
if my_df.empty:
log.debug('{}: empty dataframe, not saving'.format(df))
else:
my_df.to_csv(outpath)
log.debug('{}: saved dataframe'.format(outpath))
counter += 1
log.debug('Saved {} dataframes at {}'.format(counter, outdir)) | python | def save_dataframes(self, outdir, prefix='df_'):
"""Save all attributes that start with "df" into a specified directory.
Args:
outdir (str): Path to output directory
prefix (str): Prefix that dataframe attributes start with
"""
# Get list of attributes that start with "df_"
dfs = list(filter(lambda x: x.startswith(prefix), dir(self)))
counter = 0
for df in dfs:
outpath = ssbio.utils.outfile_maker(inname=df, outext='.csv', outdir=outdir)
my_df = getattr(self, df)
if not isinstance(my_df, pd.DataFrame):
raise TypeError('{}: object is not a Pandas DataFrame'.format(df))
if my_df.empty:
log.debug('{}: empty dataframe, not saving'.format(df))
else:
my_df.to_csv(outpath)
log.debug('{}: saved dataframe'.format(outpath))
counter += 1
log.debug('Saved {} dataframes at {}'.format(counter, outdir)) | [
"def",
"save_dataframes",
"(",
"self",
",",
"outdir",
",",
"prefix",
"=",
"'df_'",
")",
":",
"# Get list of attributes that start with \"df_\"",
"dfs",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"startswith",
"(",
"prefix",
")",
",",
"dir... | Save all attributes that start with "df" into a specified directory.
Args:
outdir (str): Path to output directory
prefix (str): Prefix that dataframe attributes start with | [
"Save",
"all",
"attributes",
"that",
"start",
"with",
"df",
"into",
"a",
"specified",
"directory",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/object.py#L138-L163 | train | 28,973 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/Hydrogenate.py | Hydrogenate_Protein._build_bonding_network | def _build_bonding_network(self):
"""
Evaluates atoms per residue for missing and known bonded partners.
Based on bond_amber.
A better alternative would be to iterate over the entire list of residues and
use NeighborSearch to probe neighbors for atom X in residue i, i-1 and i+1
"""
self.bonds = {} # [Residue][Atom]: [ [missing], [bonded] ]
self.selection = {} # [Residue]: 'nrml'/'nter'/'cter'
missing = 0
for residue in self.nh_structure.get_residues():
bond_dict = self.bonds[residue] = {}
atom_dict = residue.child_dict
atom_names = set(atom_dict.keys())
# Pre-Populate Dictionary
for name in atom_names:
bond_dict[name] = [ [], [] ]
# Define Template
if atom_names.intersection(self.C_TERMINAL_ATOMS):
selection = 'cter'
elif atom_names.intersection(self.N_TERMINAL_ATOMS):
selection = 'nter'
else:
selection = 'nrml'
tmpl = self.tmpl[selection]
self.selection[residue] = selection # For place_hs
# Iterate Template Bonds and record info
if not tmpl.has_key(residue.resname):
raise ValueError("Unknown Residue Type: %s" %residue.resname)
template_bonds = tmpl[residue.resname]['bonds']
for bond in template_bonds.keys():
a1, a2 = bond
if a1 in atom_names and not a2 in atom_names:
bond_dict[a1][0].append(a2)
missing += 1
elif a1 not in atom_names and a2 in atom_names:
bond_dict[a2][0].append(a1)
missing += 1
else: #
bond_dict[a1][1].append(atom_dict[a2])
bond_dict[a2][1].append(atom_dict[a1])
return missing | python | def _build_bonding_network(self):
"""
Evaluates atoms per residue for missing and known bonded partners.
Based on bond_amber.
A better alternative would be to iterate over the entire list of residues and
use NeighborSearch to probe neighbors for atom X in residue i, i-1 and i+1
"""
self.bonds = {} # [Residue][Atom]: [ [missing], [bonded] ]
self.selection = {} # [Residue]: 'nrml'/'nter'/'cter'
missing = 0
for residue in self.nh_structure.get_residues():
bond_dict = self.bonds[residue] = {}
atom_dict = residue.child_dict
atom_names = set(atom_dict.keys())
# Pre-Populate Dictionary
for name in atom_names:
bond_dict[name] = [ [], [] ]
# Define Template
if atom_names.intersection(self.C_TERMINAL_ATOMS):
selection = 'cter'
elif atom_names.intersection(self.N_TERMINAL_ATOMS):
selection = 'nter'
else:
selection = 'nrml'
tmpl = self.tmpl[selection]
self.selection[residue] = selection # For place_hs
# Iterate Template Bonds and record info
if not tmpl.has_key(residue.resname):
raise ValueError("Unknown Residue Type: %s" %residue.resname)
template_bonds = tmpl[residue.resname]['bonds']
for bond in template_bonds.keys():
a1, a2 = bond
if a1 in atom_names and not a2 in atom_names:
bond_dict[a1][0].append(a2)
missing += 1
elif a1 not in atom_names and a2 in atom_names:
bond_dict[a2][0].append(a1)
missing += 1
else: #
bond_dict[a1][1].append(atom_dict[a2])
bond_dict[a2][1].append(atom_dict[a1])
return missing | [
"def",
"_build_bonding_network",
"(",
"self",
")",
":",
"self",
".",
"bonds",
"=",
"{",
"}",
"# [Residue][Atom]: [ [missing], [bonded] ]",
"self",
".",
"selection",
"=",
"{",
"}",
"# [Residue]: 'nrml'/'nter'/'cter'",
"missing",
"=",
"0",
"for",
"residue",
"in",
"... | Evaluates atoms per residue for missing and known bonded partners.
Based on bond_amber.
A better alternative would be to iterate over the entire list of residues and
use NeighborSearch to probe neighbors for atom X in residue i, i-1 and i+1 | [
"Evaluates",
"atoms",
"per",
"residue",
"for",
"missing",
"and",
"known",
"bonded",
"partners",
".",
"Based",
"on",
"bond_amber",
".",
"A",
"better",
"alternative",
"would",
"be",
"to",
"iterate",
"over",
"the",
"entire",
"list",
"of",
"residues",
"and",
"us... | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/Hydrogenate.py#L76-L133 | train | 28,974 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/Hydrogenate.py | Hydrogenate_Protein._exclude_ss_bonded_cysteines | def _exclude_ss_bonded_cysteines(self):
"""
Pre-compute ss bonds to discard cystines for H-adding.
"""
ss_bonds = self.nh_structure.search_ss_bonds()
for cys_pair in ss_bonds:
cys1, cys2 = cys_pair
cys1.resname = 'CYX'
cys2.resname = 'CYX' | python | def _exclude_ss_bonded_cysteines(self):
"""
Pre-compute ss bonds to discard cystines for H-adding.
"""
ss_bonds = self.nh_structure.search_ss_bonds()
for cys_pair in ss_bonds:
cys1, cys2 = cys_pair
cys1.resname = 'CYX'
cys2.resname = 'CYX' | [
"def",
"_exclude_ss_bonded_cysteines",
"(",
"self",
")",
":",
"ss_bonds",
"=",
"self",
".",
"nh_structure",
".",
"search_ss_bonds",
"(",
")",
"for",
"cys_pair",
"in",
"ss_bonds",
":",
"cys1",
",",
"cys2",
"=",
"cys_pair",
"cys1",
".",
"resname",
"=",
"'CYX'"... | Pre-compute ss bonds to discard cystines for H-adding. | [
"Pre",
"-",
"compute",
"ss",
"bonds",
"to",
"discard",
"cystines",
"for",
"H",
"-",
"adding",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/Hydrogenate.py#L135-L145 | train | 28,975 |
SBRG/ssbio | ssbio/biopython/Bio/Struct/Hydrogenate.py | Hydrogenate_Protein._find_secondary_anchors | def _find_secondary_anchors(self, residue, heavy_atom, anchor):
"""
Searches through the bond network for atoms bound to the anchor.
Returns a secondary and tertiary anchors.
Example, for CA, returns C and O.
"""
for secondary in self.bonds[residue][anchor.name][1]:
for tertiary in self.bonds[residue][secondary.name][1]:
if (tertiary.name != heavy_atom.name
and tertiary.name != anchor.name):
return (secondary, tertiary)
return None | python | def _find_secondary_anchors(self, residue, heavy_atom, anchor):
"""
Searches through the bond network for atoms bound to the anchor.
Returns a secondary and tertiary anchors.
Example, for CA, returns C and O.
"""
for secondary in self.bonds[residue][anchor.name][1]:
for tertiary in self.bonds[residue][secondary.name][1]:
if (tertiary.name != heavy_atom.name
and tertiary.name != anchor.name):
return (secondary, tertiary)
return None | [
"def",
"_find_secondary_anchors",
"(",
"self",
",",
"residue",
",",
"heavy_atom",
",",
"anchor",
")",
":",
"for",
"secondary",
"in",
"self",
".",
"bonds",
"[",
"residue",
"]",
"[",
"anchor",
".",
"name",
"]",
"[",
"1",
"]",
":",
"for",
"tertiary",
"in"... | Searches through the bond network for atoms bound to the anchor.
Returns a secondary and tertiary anchors.
Example, for CA, returns C and O. | [
"Searches",
"through",
"the",
"bond",
"network",
"for",
"atoms",
"bound",
"to",
"the",
"anchor",
".",
"Returns",
"a",
"secondary",
"and",
"tertiary",
"anchors",
".",
"Example",
"for",
"CA",
"returns",
"C",
"and",
"O",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/Hydrogenate.py#L147-L161 | train | 28,976 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | parse_results_mol2 | def parse_results_mol2(mol2_outpath):
"""Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results
"""
docked_ligands = pd.DataFrame()
lines = [line.strip() for line in open(mol2_outpath, 'r')]
props = {}
for i, line in enumerate(lines):
if line.startswith('########## Name:'):
ligand = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')[1]
line = lines[i + 1]
props = {}
props['Ligand'] = ligand
if line.startswith('##########'):
splitter = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')
props[splitter[0]] = float(splitter[1])
if line.startswith('@<TRIPOS>MOLECULE'):
if props:
docked_ligands = docked_ligands.append(props, ignore_index=True)
return docked_ligands | python | def parse_results_mol2(mol2_outpath):
"""Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results
"""
docked_ligands = pd.DataFrame()
lines = [line.strip() for line in open(mol2_outpath, 'r')]
props = {}
for i, line in enumerate(lines):
if line.startswith('########## Name:'):
ligand = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')[1]
line = lines[i + 1]
props = {}
props['Ligand'] = ligand
if line.startswith('##########'):
splitter = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')
props[splitter[0]] = float(splitter[1])
if line.startswith('@<TRIPOS>MOLECULE'):
if props:
docked_ligands = docked_ligands.append(props, ignore_index=True)
return docked_ligands | [
"def",
"parse_results_mol2",
"(",
"mol2_outpath",
")",
":",
"docked_ligands",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"lines",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"open",
"(",
"mol2_outpath",
",",
"'r'",
")",
"]",
"props",
"="... | Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results | [
"Parse",
"a",
"DOCK6",
"mol2",
"output",
"file",
"return",
"a",
"Pandas",
"DataFrame",
"of",
"the",
"results",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L592-L620 | train | 28,977 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.structure_path | def structure_path(self, path):
"""Provide pointers to the paths of the structure file
Args:
path: Path to structure file
"""
if not path:
self.structure_dir = None
self.structure_file = None
else:
if not op.exists(path):
raise OSError('{}: file does not exist!'.format(path))
if not op.dirname(path):
self.structure_dir = '.'
else:
self.structure_dir = op.dirname(path)
self.structure_file = op.basename(path) | python | def structure_path(self, path):
"""Provide pointers to the paths of the structure file
Args:
path: Path to structure file
"""
if not path:
self.structure_dir = None
self.structure_file = None
else:
if not op.exists(path):
raise OSError('{}: file does not exist!'.format(path))
if not op.dirname(path):
self.structure_dir = '.'
else:
self.structure_dir = op.dirname(path)
self.structure_file = op.basename(path) | [
"def",
"structure_path",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"path",
":",
"self",
".",
"structure_dir",
"=",
"None",
"self",
".",
"structure_file",
"=",
"None",
"else",
":",
"if",
"not",
"op",
".",
"exists",
"(",
"path",
")",
":",
"raise... | Provide pointers to the paths of the structure file
Args:
path: Path to structure file | [
"Provide",
"pointers",
"to",
"the",
"paths",
"of",
"the",
"structure",
"file"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L117-L136 | train | 28,978 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.dockprep | def dockprep(self, force_rerun=False):
"""Prepare a PDB file for docking by first converting it to mol2 format.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running dock preparation...'.format(self.id))
prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id))
prep_py = op.join(self.dock_dir, "prep.py")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2):
with open(prep_py, "w") as f:
f.write('import chimera\n')
f.write('from DockPrep import prep\n')
f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\n')
f.write('prep(models)\n')
f.write('from WriteMol2 import writeMol2\n')
f.write('writeMol2(models, "{}")\n'.format(prep_mol2))
cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py)
os.system(cmd)
os.remove(prep_py)
os.remove('{}c'.format(prep_py))
if ssbio.utils.is_non_zero_file(prep_mol2):
self.dockprep_path = prep_mol2
log.debug('{}: successful dockprep execution'.format(self.dockprep_path))
else:
log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path)) | python | def dockprep(self, force_rerun=False):
"""Prepare a PDB file for docking by first converting it to mol2 format.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running dock preparation...'.format(self.id))
prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id))
prep_py = op.join(self.dock_dir, "prep.py")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2):
with open(prep_py, "w") as f:
f.write('import chimera\n')
f.write('from DockPrep import prep\n')
f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\n')
f.write('prep(models)\n')
f.write('from WriteMol2 import writeMol2\n')
f.write('writeMol2(models, "{}")\n'.format(prep_mol2))
cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py)
os.system(cmd)
os.remove(prep_py)
os.remove('{}c'.format(prep_py))
if ssbio.utils.is_non_zero_file(prep_mol2):
self.dockprep_path = prep_mol2
log.debug('{}: successful dockprep execution'.format(self.dockprep_path))
else:
log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path)) | [
"def",
"dockprep",
"(",
"self",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'{}: running dock preparation...'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"prep_mol2",
"=",
"op",
".",
"join",
"(",
"self",
".",
"dock_dir",... | Prepare a PDB file for docking by first converting it to mol2 format.
Args:
force_rerun (bool): If method should be rerun even if output file exists | [
"Prepare",
"a",
"PDB",
"file",
"for",
"docking",
"by",
"first",
"converting",
"it",
"to",
"mol2",
"format",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L138-L168 | train | 28,979 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.protein_only_and_noH | def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):
"""Isolate the receptor by stripping everything except protein and specified ligands.
Args:
keep_ligands (str, list): Ligand(s) to keep in PDB file
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running protein receptor isolation...'.format(self.id))
if not self.dockprep_path:
return ValueError('Please run dockprep')
receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))
receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))
prly_com = op.join(self.dock_dir, "prly.com")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):
with open(prly_com, "w") as f:
f.write('open {}\n'.format(self.dockprep_path))
keep_str = 'delete ~protein'
if keep_ligands:
keep_ligands = ssbio.utils.force_list(keep_ligands)
for res in keep_ligands:
keep_str += ' & ~:{} '.format(res)
keep_str = keep_str.strip() + '\n'
f.write(keep_str)
f.write('write format mol2 0 {}\n'.format(receptor_mol2))
f.write('delete element.H\n')
f.write('write format pdb 0 {}\n'.format(receptor_noh))
cmd = 'chimera --nogui {}'.format(prly_com)
os.system(cmd)
os.remove(prly_com)
if ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh):
self.receptormol2_path = receptor_mol2
self.receptorpdb_path = receptor_noh
log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))
log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))
else:
log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path)) | python | def protein_only_and_noH(self, keep_ligands=None, force_rerun=False):
"""Isolate the receptor by stripping everything except protein and specified ligands.
Args:
keep_ligands (str, list): Ligand(s) to keep in PDB file
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running protein receptor isolation...'.format(self.id))
if not self.dockprep_path:
return ValueError('Please run dockprep')
receptor_mol2 = op.join(self.dock_dir, '{}_receptor.mol2'.format(self.id))
receptor_noh = op.join(self.dock_dir, '{}_receptor_noH.pdb'.format(self.id))
prly_com = op.join(self.dock_dir, "prly.com")
if ssbio.utils.force_rerun(flag=force_rerun, outfile=receptor_noh):
with open(prly_com, "w") as f:
f.write('open {}\n'.format(self.dockprep_path))
keep_str = 'delete ~protein'
if keep_ligands:
keep_ligands = ssbio.utils.force_list(keep_ligands)
for res in keep_ligands:
keep_str += ' & ~:{} '.format(res)
keep_str = keep_str.strip() + '\n'
f.write(keep_str)
f.write('write format mol2 0 {}\n'.format(receptor_mol2))
f.write('delete element.H\n')
f.write('write format pdb 0 {}\n'.format(receptor_noh))
cmd = 'chimera --nogui {}'.format(prly_com)
os.system(cmd)
os.remove(prly_com)
if ssbio.utils.is_non_zero_file(receptor_mol2) and ssbio.utils.is_non_zero_file(receptor_noh):
self.receptormol2_path = receptor_mol2
self.receptorpdb_path = receptor_noh
log.debug('{}: successful receptor isolation (mol2)'.format(self.receptormol2_path))
log.debug('{}: successful receptor isolation (pdb)'.format(self.receptorpdb_path))
else:
log.critical('{}: protein_only_and_noH failed to run on dockprep file'.format(self.dockprep_path)) | [
"def",
"protein_only_and_noH",
"(",
"self",
",",
"keep_ligands",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'{}: running protein receptor isolation...'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"if",
"not",
"... | Isolate the receptor by stripping everything except protein and specified ligands.
Args:
keep_ligands (str, list): Ligand(s) to keep in PDB file
force_rerun (bool): If method should be rerun even if output file exists | [
"Isolate",
"the",
"receptor",
"by",
"stripping",
"everything",
"except",
"protein",
"and",
"specified",
"ligands",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L170-L214 | train | 28,980 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.binding_site_mol2 | def binding_site_mol2(self, residues, force_rerun=False):
"""Create mol2 of only binding site residues from the receptor
This function will take in a .pdb file (preferably the _receptor_noH.pdb file)
and a string of residues (eg: '144,170,199') and delete all other residues in the
.pdb file. It then saves the coordinates of the selected residues as a .mol2 file.
This is necessary for Chimera to select spheres within the radius of the binding
site.
Args:
residues (str): Comma separated string of residues (eg: '144,170,199')
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running binding site isolation...'.format(self.id))
if not self.receptorpdb_path:
return ValueError('Please run protein_only_and_noH')
prefix = self.id + '_' + 'binding_residues'
mol2maker = op.join(self.dock_dir, '{}_make_mol2.py'.format(prefix))
outfile = op.join(self.dock_dir, '{}.mol2'.format(prefix))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(mol2maker, 'w') as mol2_maker:
mol2_maker.write('#! /usr/bin/env python\n')
mol2_maker.write('from chimera import runCommand\n')
mol2_maker.write('runCommand("open {}")\n'.format(self.receptorpdb_path))
mol2_maker.write('runCommand("delete ~:{}")\n'.format(residues))
mol2_maker.write('runCommand("write format mol2 resnum 0 {}")\n'.format(outfile))
mol2_maker.write('runCommand("close all")')
cmd = 'chimera --nogui {}'.format(mol2maker)
os.system(cmd)
os.remove(mol2maker)
os.remove('{}c'.format(mol2maker))
if ssbio.utils.is_non_zero_file(outfile):
self.bindingsite_path = outfile
log.debug('{}: successful binding site isolation'.format(self.bindingsite_path))
else:
log.critical('{}: binding_site_mol2 failed to run on receptor file'.format(self.receptorpdb_path)) | python | def binding_site_mol2(self, residues, force_rerun=False):
"""Create mol2 of only binding site residues from the receptor
This function will take in a .pdb file (preferably the _receptor_noH.pdb file)
and a string of residues (eg: '144,170,199') and delete all other residues in the
.pdb file. It then saves the coordinates of the selected residues as a .mol2 file.
This is necessary for Chimera to select spheres within the radius of the binding
site.
Args:
residues (str): Comma separated string of residues (eg: '144,170,199')
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running binding site isolation...'.format(self.id))
if not self.receptorpdb_path:
return ValueError('Please run protein_only_and_noH')
prefix = self.id + '_' + 'binding_residues'
mol2maker = op.join(self.dock_dir, '{}_make_mol2.py'.format(prefix))
outfile = op.join(self.dock_dir, '{}.mol2'.format(prefix))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(mol2maker, 'w') as mol2_maker:
mol2_maker.write('#! /usr/bin/env python\n')
mol2_maker.write('from chimera import runCommand\n')
mol2_maker.write('runCommand("open {}")\n'.format(self.receptorpdb_path))
mol2_maker.write('runCommand("delete ~:{}")\n'.format(residues))
mol2_maker.write('runCommand("write format mol2 resnum 0 {}")\n'.format(outfile))
mol2_maker.write('runCommand("close all")')
cmd = 'chimera --nogui {}'.format(mol2maker)
os.system(cmd)
os.remove(mol2maker)
os.remove('{}c'.format(mol2maker))
if ssbio.utils.is_non_zero_file(outfile):
self.bindingsite_path = outfile
log.debug('{}: successful binding site isolation'.format(self.bindingsite_path))
else:
log.critical('{}: binding_site_mol2 failed to run on receptor file'.format(self.receptorpdb_path)) | [
"def",
"binding_site_mol2",
"(",
"self",
",",
"residues",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'{}: running binding site isolation...'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"if",
"not",
"self",
".",
"receptorpdb... | Create mol2 of only binding site residues from the receptor
This function will take in a .pdb file (preferably the _receptor_noH.pdb file)
and a string of residues (eg: '144,170,199') and delete all other residues in the
.pdb file. It then saves the coordinates of the selected residues as a .mol2 file.
This is necessary for Chimera to select spheres within the radius of the binding
site.
Args:
residues (str): Comma separated string of residues (eg: '144,170,199')
force_rerun (bool): If method should be rerun even if output file exists | [
"Create",
"mol2",
"of",
"only",
"binding",
"site",
"residues",
"from",
"the",
"receptor"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L278-L319 | train | 28,981 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.sphere_selector_using_residues | def sphere_selector_using_residues(self, radius, force_rerun=False):
"""Select spheres based on binding site residues
Args:
radius (int, float): Radius around binding residues to dock to
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running sphere selector...'.format(self.id))
if not self.sphgen_path or not self.bindingsite_path:
return ValueError('Please run sphgen and binding_site_mol2')
selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph):
cmd = "sphere_selector {} {} {}".format(self.sphgen_path, self.bindingsite_path, radius)
rename = "mv selected_spheres.sph {}".format(selsph)
os.system(cmd)
os.system(rename)
if ssbio.utils.is_non_zero_file(selsph):
self.sphsel_path = selsph
log.debug('{}: successful sphere selection'.format(self.sphsel_path))
else:
log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path)) | python | def sphere_selector_using_residues(self, radius, force_rerun=False):
"""Select spheres based on binding site residues
Args:
radius (int, float): Radius around binding residues to dock to
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running sphere selector...'.format(self.id))
if not self.sphgen_path or not self.bindingsite_path:
return ValueError('Please run sphgen and binding_site_mol2')
selsph = op.join(self.dock_dir, '{}_selsph_binding.sph'.format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=selsph):
cmd = "sphere_selector {} {} {}".format(self.sphgen_path, self.bindingsite_path, radius)
rename = "mv selected_spheres.sph {}".format(selsph)
os.system(cmd)
os.system(rename)
if ssbio.utils.is_non_zero_file(selsph):
self.sphsel_path = selsph
log.debug('{}: successful sphere selection'.format(self.sphsel_path))
else:
log.critical('{}: sphere_selector_using_residues failed to run on sph file'.format(self.sphgen_path)) | [
"def",
"sphere_selector_using_residues",
"(",
"self",
",",
"radius",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'{}: running sphere selector...'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"if",
"not",
"self",
".",
"sphgen_... | Select spheres based on binding site residues
Args:
radius (int, float): Radius around binding residues to dock to
force_rerun (bool): If method should be rerun even if output file exists | [
"Select",
"spheres",
"based",
"on",
"binding",
"site",
"residues"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L321-L347 | train | 28,982 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.showbox | def showbox(self, force_rerun=False):
"""Create the dummy PDB box around the selected spheres.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running box maker...'.format(self.id))
if not self.sphsel_path:
return ValueError('Please run sphere_selector_using_residues')
boxfile = op.join(self.dock_dir, "{}_box.pdb".format(self.id))
boxscript = op.join(self.dock_dir, "{}_box.in".format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=boxfile):
with open(boxscript, "w") as f:
f.write("Y\n")
f.write("0\n")
f.write("{}\n".format(op.basename(self.sphsel_path)))
f.write("1\n")
f.write("{}".format(op.basename(boxfile)))
cmd = "showbox < {}".format(boxscript)
os.chdir(self.dock_dir)
os.system(cmd)
if ssbio.utils.is_non_zero_file(boxfile):
self.box_path = boxfile
log.debug('{}: successful box creation'.format(self.box_path))
else:
log.critical('{}: showbox failed to run on selected spheres file'.format(self.sphsel_path)) | python | def showbox(self, force_rerun=False):
"""Create the dummy PDB box around the selected spheres.
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running box maker...'.format(self.id))
if not self.sphsel_path:
return ValueError('Please run sphere_selector_using_residues')
boxfile = op.join(self.dock_dir, "{}_box.pdb".format(self.id))
boxscript = op.join(self.dock_dir, "{}_box.in".format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=boxfile):
with open(boxscript, "w") as f:
f.write("Y\n")
f.write("0\n")
f.write("{}\n".format(op.basename(self.sphsel_path)))
f.write("1\n")
f.write("{}".format(op.basename(boxfile)))
cmd = "showbox < {}".format(boxscript)
os.chdir(self.dock_dir)
os.system(cmd)
if ssbio.utils.is_non_zero_file(boxfile):
self.box_path = boxfile
log.debug('{}: successful box creation'.format(self.box_path))
else:
log.critical('{}: showbox failed to run on selected spheres file'.format(self.sphsel_path)) | [
"def",
"showbox",
"(",
"self",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'{}: running box maker...'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"if",
"not",
"self",
".",
"sphsel_path",
":",
"return",
"ValueError",
"(",... | Create the dummy PDB box around the selected spheres.
Args:
force_rerun (bool): If method should be rerun even if output file exists | [
"Create",
"the",
"dummy",
"PDB",
"box",
"around",
"the",
"selected",
"spheres",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L367-L398 | train | 28,983 |
SBRG/ssbio | ssbio/protein/structure/utils/dock.py | DOCK.auto_flexdock | def auto_flexdock(self, binding_residues, radius, ligand_path=None, force_rerun=False):
"""Run DOCK6 on a PDB file, given its binding residues and a radius around them.
Provide a path to a ligand to dock a ligand to it. If no ligand is provided, DOCK6 preparations will be run on
that structure file.
Args:
binding_residues (str): Comma separated string of residues (eg: '144,170,199')
radius (int, float): Radius around binding residues to dock to
ligand_path (str): Path to ligand (mol2 format) to dock to protein
force_rerun (bool): If method should be rerun even if output files exist
"""
log.debug('\n{}: running DOCK6...\n'
'\tBinding residues: {}\n'
'\tBinding residues radius: {}\n'
'\tLigand to dock: {}\n'.format(self.id, binding_residues, radius, op.basename(ligand_path)))
self.dockprep(force_rerun=force_rerun)
self.protein_only_and_noH(force_rerun=force_rerun)
self.dms_maker(force_rerun=force_rerun)
self.sphgen(force_rerun=force_rerun)
self.binding_site_mol2(residues=binding_residues, force_rerun=force_rerun)
self.sphere_selector_using_residues(radius=radius, force_rerun=force_rerun)
self.showbox(force_rerun=force_rerun)
self.grid(force_rerun=force_rerun)
if ligand_path:
self.do_dock6_flexible(ligand_path=ligand_path, force_rerun=force_rerun) | python | def auto_flexdock(self, binding_residues, radius, ligand_path=None, force_rerun=False):
"""Run DOCK6 on a PDB file, given its binding residues and a radius around them.
Provide a path to a ligand to dock a ligand to it. If no ligand is provided, DOCK6 preparations will be run on
that structure file.
Args:
binding_residues (str): Comma separated string of residues (eg: '144,170,199')
radius (int, float): Radius around binding residues to dock to
ligand_path (str): Path to ligand (mol2 format) to dock to protein
force_rerun (bool): If method should be rerun even if output files exist
"""
log.debug('\n{}: running DOCK6...\n'
'\tBinding residues: {}\n'
'\tBinding residues radius: {}\n'
'\tLigand to dock: {}\n'.format(self.id, binding_residues, radius, op.basename(ligand_path)))
self.dockprep(force_rerun=force_rerun)
self.protein_only_and_noH(force_rerun=force_rerun)
self.dms_maker(force_rerun=force_rerun)
self.sphgen(force_rerun=force_rerun)
self.binding_site_mol2(residues=binding_residues, force_rerun=force_rerun)
self.sphere_selector_using_residues(radius=radius, force_rerun=force_rerun)
self.showbox(force_rerun=force_rerun)
self.grid(force_rerun=force_rerun)
if ligand_path:
self.do_dock6_flexible(ligand_path=ligand_path, force_rerun=force_rerun) | [
"def",
"auto_flexdock",
"(",
"self",
",",
"binding_residues",
",",
"radius",
",",
"ligand_path",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'\\n{}: running DOCK6...\\n'",
"'\\tBinding residues: {}\\n'",
"'\\tBinding residues ra... | Run DOCK6 on a PDB file, given its binding residues and a radius around them.
Provide a path to a ligand to dock a ligand to it. If no ligand is provided, DOCK6 preparations will be run on
that structure file.
Args:
binding_residues (str): Comma separated string of residues (eg: '144,170,199')
radius (int, float): Radius around binding residues to dock to
ligand_path (str): Path to ligand (mol2 format) to dock to protein
force_rerun (bool): If method should be rerun even if output files exist | [
"Run",
"DOCK6",
"on",
"a",
"PDB",
"file",
"given",
"its",
"binding",
"residues",
"and",
"a",
"radius",
"around",
"them",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/dock.py#L562-L590 | train | 28,984 |
SBRG/ssbio | ssbio/databases/metalpdb.py | get_metalpdb_info | def get_metalpdb_info(metalpdb_lig_file):
"""Parse a MetalPDB .lig file and return a tuple of the chain ID it represents, along with metal binding information.
Args:
metalpdb_lig_file (str): Path to .lig file
Returns:
tuple: (str, dict) of the chain ID and the parsed metal binding site information
"""
pdb_metals = ['CU', 'ZN', 'MN', 'FE', 'MG', 'CO', 'SE', 'YB', 'SF4', 'FES', 'F3S', 'NI', 'FE2']
# Information to collect
coordination_number = 0
endogenous_ligands = []
exogenous_ligands = []
# Load the structure
ss = StructProp(ident='metalpdb', structure_path=metalpdb_lig_file, file_type='pdb')
# This lig file should just be for one chain
chain_id = op.basename(metalpdb_lig_file)[5]
metal_id = (op.basename(metalpdb_lig_file).split('_')[2], op.basename(metalpdb_lig_file).split('_')[3])
for r in ss.parse_structure().first_model.get_residues():
return_id = (r.get_id(), r.get_resname())
# print(r.resname)
# Binding partners
## check if residue is a normal one (not a HETATM, WAT, or the metal that is identified)
if r.get_id()[0] != ' ':
if not r.resname.strip() in pdb_metals and r.resname != 'HOH':
# print('appended', r.resname)
exogenous_ligands.append(return_id)
else:
endogenous_ligands.append(return_id)
# Coordination number
for a in r.get_atom():
if not a.element in pdb_metals:
coordination_number += 1
infodict = {metal_id: {'endogenous_ligands' : endogenous_ligands,
'exogenous_ligands' : exogenous_ligands,
'coordination_number': coordination_number}}
return chain_id, infodict | python | def get_metalpdb_info(metalpdb_lig_file):
"""Parse a MetalPDB .lig file and return a tuple of the chain ID it represents, along with metal binding information.
Args:
metalpdb_lig_file (str): Path to .lig file
Returns:
tuple: (str, dict) of the chain ID and the parsed metal binding site information
"""
pdb_metals = ['CU', 'ZN', 'MN', 'FE', 'MG', 'CO', 'SE', 'YB', 'SF4', 'FES', 'F3S', 'NI', 'FE2']
# Information to collect
coordination_number = 0
endogenous_ligands = []
exogenous_ligands = []
# Load the structure
ss = StructProp(ident='metalpdb', structure_path=metalpdb_lig_file, file_type='pdb')
# This lig file should just be for one chain
chain_id = op.basename(metalpdb_lig_file)[5]
metal_id = (op.basename(metalpdb_lig_file).split('_')[2], op.basename(metalpdb_lig_file).split('_')[3])
for r in ss.parse_structure().first_model.get_residues():
return_id = (r.get_id(), r.get_resname())
# print(r.resname)
# Binding partners
## check if residue is a normal one (not a HETATM, WAT, or the metal that is identified)
if r.get_id()[0] != ' ':
if not r.resname.strip() in pdb_metals and r.resname != 'HOH':
# print('appended', r.resname)
exogenous_ligands.append(return_id)
else:
endogenous_ligands.append(return_id)
# Coordination number
for a in r.get_atom():
if not a.element in pdb_metals:
coordination_number += 1
infodict = {metal_id: {'endogenous_ligands' : endogenous_ligands,
'exogenous_ligands' : exogenous_ligands,
'coordination_number': coordination_number}}
return chain_id, infodict | [
"def",
"get_metalpdb_info",
"(",
"metalpdb_lig_file",
")",
":",
"pdb_metals",
"=",
"[",
"'CU'",
",",
"'ZN'",
",",
"'MN'",
",",
"'FE'",
",",
"'MG'",
",",
"'CO'",
",",
"'SE'",
",",
"'YB'",
",",
"'SF4'",
",",
"'FES'",
",",
"'F3S'",
",",
"'NI'",
",",
"'F... | Parse a MetalPDB .lig file and return a tuple of the chain ID it represents, along with metal binding information.
Args:
metalpdb_lig_file (str): Path to .lig file
Returns:
tuple: (str, dict) of the chain ID and the parsed metal binding site information | [
"Parse",
"a",
"MetalPDB",
".",
"lig",
"file",
"and",
"return",
"a",
"tuple",
"of",
"the",
"chain",
"ID",
"it",
"represents",
"along",
"with",
"metal",
"binding",
"information",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/databases/metalpdb.py#L6-L52 | train | 28,985 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | pairwise_sequence_alignment | def pairwise_sequence_alignment(a_seq, b_seq, engine, a_seq_id=None, b_seq_id=None,
gapopen=10, gapextend=0.5,
outfile=None, outdir=None, force_rerun=False):
"""Run a global pairwise sequence alignment between two sequence strings.
Args:
a_seq (str, Seq, SeqRecord, SeqProp): Reference sequence
b_seq (str, Seq, SeqRecord, SeqProp): Sequence to be aligned to reference
engine (str): `biopython` or `needle` - which pairwise alignment program to use
a_seq_id (str): Reference sequence ID. If not set, is "a_seq"
b_seq_id (str): Sequence to be aligned ID. If not set, is "b_seq"
gapopen (int): Only for `needle` - Gap open penalty is the score taken away when a gap is created
gapextend (float): Only for `needle` - Gap extension penalty is added to the standard gap penalty for each
base or residue in the gap
outfile (str): Only for `needle` - name of output file. If not set, is {id_a}_{id_b}_align.txt
outdir (str): Only for `needle` - Path to output directory. Default is the current directory.
force_rerun (bool): Only for `needle` - Default False, set to True if you want to rerun the alignment
if outfile exists.
Returns:
MultipleSeqAlignment: Biopython object to represent an alignment
"""
engine = engine.lower()
if engine not in ['biopython', 'needle']:
raise ValueError('{}: invalid engine'.format(engine))
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_seq = ssbio.protein.sequence.utils.cast_to_str(a_seq)
b_seq = ssbio.protein.sequence.utils.cast_to_str(b_seq)
if engine == 'biopython':
# TODO: allow different matrices? needle uses blosum62 by default, how to change that?
# TODO: how to define gap open/extend when using matrix in biopython global alignment?
log.warning('Gap penalties not implemented in Biopython yet')
blosum62 = matlist.blosum62
alignments = pairwise2.align.globaldx(a_seq, b_seq, blosum62) # TODO: add gap penalties
best_alignment = alignments[0]
a = ssbio.protein.sequence.utils.cast_to_seq_record(best_alignment[0], id=a_seq_id)
b = ssbio.protein.sequence.utils.cast_to_seq_record(best_alignment[1], id=b_seq_id)
alignment = MultipleSeqAlignment([a, b], annotations={"score": best_alignment[2],
"start": best_alignment[3],
"end" : best_alignment[4]})
alignment.annotations['percent_identity'] = get_percent_identity(best_alignment[0], best_alignment[1]) * 100
return alignment
if engine == 'needle':
alignment_file = run_needle_alignment(seq_a=a_seq, seq_b=b_seq, gapopen=gapopen, gapextend=gapextend,
write_outfile=True, # Has to be true, AlignIO parses files on disk
outdir=outdir, outfile=outfile, force_rerun=force_rerun)
log.debug('Needle alignment at {}'.format(alignment_file))
if not op.exists(alignment_file):
raise ValueError('{}: needle alignment file does not exist'.format(alignment_file))
# Use AlignIO to parse the needle alignment, alignments[0] is the first alignment (the only one in pairwise)
# alignments = list(AlignIO.parse(alignment_file, "emboss"))
# alignment = alignments[0]
alignment = needle_statistics_alignio(alignment_file)
# Rename the sequence IDs
alignment[0].id = a_seq_id
alignment[1].id = b_seq_id
# # Add needle statistics as annotations in the alignment object
# stats = needle_statistics(alignment_file)
# alignment_ids = list(stats.keys())
# if len(alignment_ids) > 1:
# raise ValueError('Needle alignment file contains more than one pairwise alignment')
# needle_id = alignment_ids[0]
# alignment.annotations['percent_identity'] = stats[needle_id]['percent_identity']
# alignment.annotations['percent_similarity'] = stats[needle_id]['percent_similarity']
# alignment.annotations['percent_gaps'] = stats[needle_id]['percent_gaps']
# alignment.annotations['score'] = stats[needle_id]['score']
return alignment | python | def pairwise_sequence_alignment(a_seq, b_seq, engine, a_seq_id=None, b_seq_id=None,
gapopen=10, gapextend=0.5,
outfile=None, outdir=None, force_rerun=False):
"""Run a global pairwise sequence alignment between two sequence strings.
Args:
a_seq (str, Seq, SeqRecord, SeqProp): Reference sequence
b_seq (str, Seq, SeqRecord, SeqProp): Sequence to be aligned to reference
engine (str): `biopython` or `needle` - which pairwise alignment program to use
a_seq_id (str): Reference sequence ID. If not set, is "a_seq"
b_seq_id (str): Sequence to be aligned ID. If not set, is "b_seq"
gapopen (int): Only for `needle` - Gap open penalty is the score taken away when a gap is created
gapextend (float): Only for `needle` - Gap extension penalty is added to the standard gap penalty for each
base or residue in the gap
outfile (str): Only for `needle` - name of output file. If not set, is {id_a}_{id_b}_align.txt
outdir (str): Only for `needle` - Path to output directory. Default is the current directory.
force_rerun (bool): Only for `needle` - Default False, set to True if you want to rerun the alignment
if outfile exists.
Returns:
MultipleSeqAlignment: Biopython object to represent an alignment
"""
engine = engine.lower()
if engine not in ['biopython', 'needle']:
raise ValueError('{}: invalid engine'.format(engine))
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_seq = ssbio.protein.sequence.utils.cast_to_str(a_seq)
b_seq = ssbio.protein.sequence.utils.cast_to_str(b_seq)
if engine == 'biopython':
# TODO: allow different matrices? needle uses blosum62 by default, how to change that?
# TODO: how to define gap open/extend when using matrix in biopython global alignment?
log.warning('Gap penalties not implemented in Biopython yet')
blosum62 = matlist.blosum62
alignments = pairwise2.align.globaldx(a_seq, b_seq, blosum62) # TODO: add gap penalties
best_alignment = alignments[0]
a = ssbio.protein.sequence.utils.cast_to_seq_record(best_alignment[0], id=a_seq_id)
b = ssbio.protein.sequence.utils.cast_to_seq_record(best_alignment[1], id=b_seq_id)
alignment = MultipleSeqAlignment([a, b], annotations={"score": best_alignment[2],
"start": best_alignment[3],
"end" : best_alignment[4]})
alignment.annotations['percent_identity'] = get_percent_identity(best_alignment[0], best_alignment[1]) * 100
return alignment
if engine == 'needle':
alignment_file = run_needle_alignment(seq_a=a_seq, seq_b=b_seq, gapopen=gapopen, gapextend=gapextend,
write_outfile=True, # Has to be true, AlignIO parses files on disk
outdir=outdir, outfile=outfile, force_rerun=force_rerun)
log.debug('Needle alignment at {}'.format(alignment_file))
if not op.exists(alignment_file):
raise ValueError('{}: needle alignment file does not exist'.format(alignment_file))
# Use AlignIO to parse the needle alignment, alignments[0] is the first alignment (the only one in pairwise)
# alignments = list(AlignIO.parse(alignment_file, "emboss"))
# alignment = alignments[0]
alignment = needle_statistics_alignio(alignment_file)
# Rename the sequence IDs
alignment[0].id = a_seq_id
alignment[1].id = b_seq_id
# # Add needle statistics as annotations in the alignment object
# stats = needle_statistics(alignment_file)
# alignment_ids = list(stats.keys())
# if len(alignment_ids) > 1:
# raise ValueError('Needle alignment file contains more than one pairwise alignment')
# needle_id = alignment_ids[0]
# alignment.annotations['percent_identity'] = stats[needle_id]['percent_identity']
# alignment.annotations['percent_similarity'] = stats[needle_id]['percent_similarity']
# alignment.annotations['percent_gaps'] = stats[needle_id]['percent_gaps']
# alignment.annotations['score'] = stats[needle_id]['score']
return alignment | [
"def",
"pairwise_sequence_alignment",
"(",
"a_seq",
",",
"b_seq",
",",
"engine",
",",
"a_seq_id",
"=",
"None",
",",
"b_seq_id",
"=",
"None",
",",
"gapopen",
"=",
"10",
",",
"gapextend",
"=",
"0.5",
",",
"outfile",
"=",
"None",
",",
"outdir",
"=",
"None",... | Run a global pairwise sequence alignment between two sequence strings.
Args:
a_seq (str, Seq, SeqRecord, SeqProp): Reference sequence
b_seq (str, Seq, SeqRecord, SeqProp): Sequence to be aligned to reference
engine (str): `biopython` or `needle` - which pairwise alignment program to use
a_seq_id (str): Reference sequence ID. If not set, is "a_seq"
b_seq_id (str): Sequence to be aligned ID. If not set, is "b_seq"
gapopen (int): Only for `needle` - Gap open penalty is the score taken away when a gap is created
gapextend (float): Only for `needle` - Gap extension penalty is added to the standard gap penalty for each
base or residue in the gap
outfile (str): Only for `needle` - name of output file. If not set, is {id_a}_{id_b}_align.txt
outdir (str): Only for `needle` - Path to output directory. Default is the current directory.
force_rerun (bool): Only for `needle` - Default False, set to True if you want to rerun the alignment
if outfile exists.
Returns:
MultipleSeqAlignment: Biopython object to represent an alignment | [
"Run",
"a",
"global",
"pairwise",
"sequence",
"alignment",
"between",
"two",
"sequence",
"strings",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L27-L110 | train | 28,986 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | run_needle_alignment | def run_needle_alignment(seq_a, seq_b, gapopen=10, gapextend=0.5, write_outfile=True,
outdir=None, outfile=None, force_rerun=False):
"""Run the needle alignment program for two strings and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Using strings as input: https://www.biostars.org/p/91124/
Args:
id_a: ID of reference sequence
seq_a (str, Seq, SeqRecord): Reference sequence
id_b: ID of sequence to be aligned
seq_b (str, Seq, SeqRecord): String representation of sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format.
"""
# TODO: check if needle is installed and raise error if not
if not outdir:
outdir = ''
# TODO: rewrite using utils functions - does not report error if needle is not installed currently
# TODO: rethink outdir/outfile, also if this should return the tempfile or just a file object or whatever
if write_outfile:
seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)
seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)
if not outfile:
outfile = op.join(tempfile.gettempdir(), 'temp_alignment.needle')
else:
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
cmd = 'needle -outfile="{}" -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(
outfile, seq_a, seq_b, gapopen, gapextend)
command = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile
else:
seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)
seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)
cmd = 'needle -auto -stdout -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(seq_a, seq_b, gapopen, gapextend)
command = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
stdout = command.stdout.read()
return stdout | python | def run_needle_alignment(seq_a, seq_b, gapopen=10, gapextend=0.5, write_outfile=True,
outdir=None, outfile=None, force_rerun=False):
"""Run the needle alignment program for two strings and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Using strings as input: https://www.biostars.org/p/91124/
Args:
id_a: ID of reference sequence
seq_a (str, Seq, SeqRecord): Reference sequence
id_b: ID of sequence to be aligned
seq_b (str, Seq, SeqRecord): String representation of sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format.
"""
# TODO: check if needle is installed and raise error if not
if not outdir:
outdir = ''
# TODO: rewrite using utils functions - does not report error if needle is not installed currently
# TODO: rethink outdir/outfile, also if this should return the tempfile or just a file object or whatever
if write_outfile:
seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)
seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)
if not outfile:
outfile = op.join(tempfile.gettempdir(), 'temp_alignment.needle')
else:
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
cmd = 'needle -outfile="{}" -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(
outfile, seq_a, seq_b, gapopen, gapextend)
command = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile
else:
seq_a = ssbio.protein.sequence.utils.cast_to_str(seq_a)
seq_b = ssbio.protein.sequence.utils.cast_to_str(seq_b)
cmd = 'needle -auto -stdout -asequence=asis::{} -bsequence=asis::{} -gapopen={} -gapextend={}'.format(seq_a, seq_b, gapopen, gapextend)
command = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
stdout = command.stdout.read()
return stdout | [
"def",
"run_needle_alignment",
"(",
"seq_a",
",",
"seq_b",
",",
"gapopen",
"=",
"10",
",",
"gapextend",
"=",
"0.5",
",",
"write_outfile",
"=",
"True",
",",
"outdir",
"=",
"None",
",",
"outfile",
"=",
"None",
",",
"force_rerun",
"=",
"False",
")",
":",
... | Run the needle alignment program for two strings and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Using strings as input: https://www.biostars.org/p/91124/
Args:
id_a: ID of reference sequence
seq_a (str, Seq, SeqRecord): Reference sequence
id_b: ID of sequence to be aligned
seq_b (str, Seq, SeqRecord): String representation of sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format. | [
"Run",
"the",
"needle",
"alignment",
"program",
"for",
"two",
"strings",
"and",
"return",
"the",
"raw",
"alignment",
"result",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L113-L171 | train | 28,987 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | run_needle_alignment_on_files | def run_needle_alignment_on_files(id_a, faa_a, id_b, faa_b, gapopen=10, gapextend=0.5,
outdir='', outfile='', force_rerun=False):
"""Run the needle alignment program for two fasta files and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Args:
id_a: ID of reference sequence
faa_a: File path to reference sequence
id_b: ID of sequence to be aligned
faa_b: File path to sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format.
"""
# TODO: rewrite using utils functions so we can check for needle installation
# # If you don't want to save the output file, just run the alignment and return the raw results
# if not outfile and not outdir:
# needle_cline = NeedleCommandline(asequence=faa_a, bsequence=faa_b,
# gapopen=gapopen, gapextend=gapextend,
# stdout=True, auto=True)
# stdout, stderr = needle_cline()
# raw_alignment_text = stdout.decode('utf-8')
# Make a default name if no outfile is set
if not outfile:
outfile = op.join(outdir, '{}_{}.needle'.format(id_a, id_b))
else:
outfile = op.join(outdir, outfile)
# Check if the outfile already exists
if op.exists(outfile) and not force_rerun:
return outfile
# If it doesn't exist, or force_rerun=True, run the alignment
else:
cmd = 'needle -outfile="{}" -asequence="{}" -bsequence="{}" -gapopen={} -gapextend={}'.format(outfile,
faa_a,
faa_b,
gapopen,
gapextend)
command = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile | python | def run_needle_alignment_on_files(id_a, faa_a, id_b, faa_b, gapopen=10, gapextend=0.5,
outdir='', outfile='', force_rerun=False):
"""Run the needle alignment program for two fasta files and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Args:
id_a: ID of reference sequence
faa_a: File path to reference sequence
id_b: ID of sequence to be aligned
faa_b: File path to sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format.
"""
# TODO: rewrite using utils functions so we can check for needle installation
# # If you don't want to save the output file, just run the alignment and return the raw results
# if not outfile and not outdir:
# needle_cline = NeedleCommandline(asequence=faa_a, bsequence=faa_b,
# gapopen=gapopen, gapextend=gapextend,
# stdout=True, auto=True)
# stdout, stderr = needle_cline()
# raw_alignment_text = stdout.decode('utf-8')
# Make a default name if no outfile is set
if not outfile:
outfile = op.join(outdir, '{}_{}.needle'.format(id_a, id_b))
else:
outfile = op.join(outdir, outfile)
# Check if the outfile already exists
if op.exists(outfile) and not force_rerun:
return outfile
# If it doesn't exist, or force_rerun=True, run the alignment
else:
cmd = 'needle -outfile="{}" -asequence="{}" -bsequence="{}" -gapopen={} -gapextend={}'.format(outfile,
faa_a,
faa_b,
gapopen,
gapextend)
command = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = command.communicate()
return outfile | [
"def",
"run_needle_alignment_on_files",
"(",
"id_a",
",",
"faa_a",
",",
"id_b",
",",
"faa_b",
",",
"gapopen",
"=",
"10",
",",
"gapextend",
"=",
"0.5",
",",
"outdir",
"=",
"''",
",",
"outfile",
"=",
"''",
",",
"force_rerun",
"=",
"False",
")",
":",
"# T... | Run the needle alignment program for two fasta files and return the raw alignment result.
More info:
EMBOSS needle: http://www.bioinformatics.nl/cgi-bin/emboss/help/needle
Biopython wrapper: http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc84
Args:
id_a: ID of reference sequence
faa_a: File path to reference sequence
id_b: ID of sequence to be aligned
faa_b: File path to sequence to be aligned
gapopen: Gap open penalty is the score taken away when a gap is created
gapextend: Gap extension penalty is added to the standard gap penalty for each base or residue in the gap
outdir (str, optional): Path to output directory. Default is the current directory.
outfile (str, optional): Name of output file. If not set, is {id_a}_{id_b}_align.txt
force_rerun (bool): Default False, set to True if you want to rerun the alignment if outfile exists.
Returns:
str: Raw alignment result of the needle alignment in srspair format. | [
"Run",
"the",
"needle",
"alignment",
"program",
"for",
"two",
"fasta",
"files",
"and",
"return",
"the",
"raw",
"alignment",
"result",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L174-L229 | train | 28,988 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | get_percent_identity | def get_percent_identity(a_aln_seq, b_aln_seq):
"""Get the percent identity between two alignment strings"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
count = 0
gaps = 0
for n in range(0, len(a_aln_seq)):
if a_aln_seq[n] == b_aln_seq[n]:
if a_aln_seq[n] != "-":
count += 1
else:
gaps += 1
return count / float((len(a_aln_seq) - gaps)) | python | def get_percent_identity(a_aln_seq, b_aln_seq):
"""Get the percent identity between two alignment strings"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
count = 0
gaps = 0
for n in range(0, len(a_aln_seq)):
if a_aln_seq[n] == b_aln_seq[n]:
if a_aln_seq[n] != "-":
count += 1
else:
gaps += 1
return count / float((len(a_aln_seq) - gaps)) | [
"def",
"get_percent_identity",
"(",
"a_aln_seq",
",",
"b_aln_seq",
")",
":",
"if",
"len",
"(",
"a_aln_seq",
")",
"!=",
"len",
"(",
"b_aln_seq",
")",
":",
"raise",
"ValueError",
"(",
"'Sequence lengths not equal - was an alignment run?'",
")",
"count",
"=",
"0",
... | Get the percent identity between two alignment strings | [
"Get",
"the",
"percent",
"identity",
"between",
"two",
"alignment",
"strings"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L232-L247 | train | 28,989 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | get_alignment_df | def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
"""Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != '-' and b != '-':
aa_flag = 'match'
elif a != b and a == '-' and b != '-':
aa_flag = 'insertion'
elif a != b and a != '-' and b == '-':
aa_flag = 'deletion'
elif a != b and a != '-' and b == 'X':
aa_flag = 'unresolved'
elif a != b and b != '-' and a == 'X':
aa_flag = 'unresolved'
elif a != b and a != '-' and b != '-':
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == 'deletion':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if aa_flag == 'insertion':
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df | python | def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
"""Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != '-' and b != '-':
aa_flag = 'match'
elif a != b and a == '-' and b != '-':
aa_flag = 'insertion'
elif a != b and a != '-' and b == '-':
aa_flag = 'deletion'
elif a != b and a != '-' and b == 'X':
aa_flag = 'unresolved'
elif a != b and b != '-' and a == 'X':
aa_flag = 'unresolved'
elif a != b and a != '-' and b != '-':
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == 'deletion':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if aa_flag == 'insertion':
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df | [
"def",
"get_alignment_df",
"(",
"a_aln_seq",
",",
"b_aln_seq",
",",
"a_seq_id",
"=",
"None",
",",
"b_seq_id",
"=",
"None",
")",
":",
"if",
"len",
"(",
"a_aln_seq",
")",
"!=",
"len",
"(",
"b_aln_seq",
")",
":",
"raise",
"ValueError",
"(",
"'Sequence lengths... | Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment | [
"Summarize",
"two",
"alignment",
"strings",
"in",
"a",
"dataframe",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L250-L323 | train | 28,990 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | get_alignment_df_from_file | def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None):
"""Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment
"""
alignments = list(AlignIO.parse(alignment_file, "emboss"))
alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'])
for alignment in alignments:
if not a_seq_id:
a_seq_id = list(alignment)[0].id
a_seq = str(list(alignment)[0].seq)
if not b_seq_id:
b_seq_id = list(alignment)[1].id
b_seq = str(list(alignment)[1].seq)
df = get_alignment_df(a_seq, b_seq, a_seq_id, b_seq_id)
alignment_df = alignment_df.append(df).reset_index(drop=True)
return alignment_df | python | def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None):
"""Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment
"""
alignments = list(AlignIO.parse(alignment_file, "emboss"))
alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos'])
for alignment in alignments:
if not a_seq_id:
a_seq_id = list(alignment)[0].id
a_seq = str(list(alignment)[0].seq)
if not b_seq_id:
b_seq_id = list(alignment)[1].id
b_seq = str(list(alignment)[1].seq)
df = get_alignment_df(a_seq, b_seq, a_seq_id, b_seq_id)
alignment_df = alignment_df.append(df).reset_index(drop=True)
return alignment_df | [
"def",
"get_alignment_df_from_file",
"(",
"alignment_file",
",",
"a_seq_id",
"=",
"None",
",",
"b_seq_id",
"=",
"None",
")",
":",
"alignments",
"=",
"list",
"(",
"AlignIO",
".",
"parse",
"(",
"alignment_file",
",",
"\"emboss\"",
")",
")",
"alignment_df",
"=",
... | Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences.
Args:
alignment_file:
a_seq_id: Optional specification of the ID of the reference sequence
b_seq_id: Optional specification of the ID of the aligned sequence
Returns:
Pandas DataFrame: all positions in the alignment | [
"Get",
"a",
"Pandas",
"DataFrame",
"of",
"the",
"Needle",
"alignment",
"results",
".",
"Contains",
"all",
"positions",
"of",
"the",
"sequences",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L326-L352 | train | 28,991 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | get_deletions | def get_deletions(aln_df):
"""Get a list of tuples indicating the first and last residues of a deletion region, as well as the length of the deletion.
Examples:
# Deletion of residues 1 to 4, length 4
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: 'M', 1: 'G', 2: 'I', 3: 'T'}, 'id_a_pos': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'id_b_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'type': {0: 'deletion', 1: 'deletion', 2: 'deletion', 3: 'deletion'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_deletions(my_alignment)
[((1.0, 4.0), 4)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((deletion_start_resnum, deletion_end_resnum), deletion_length)
"""
deletion_df = aln_df[aln_df['type'] == 'deletion']
if not deletion_df.empty:
deletion_df['id_a_pos'] = deletion_df['id_a_pos'].astype(int)
deletions = []
for k, g in groupby(deletion_df.index, key=lambda n, c=count(): n - next(c)):
tmp = list(g)
deletion_indices = (min(tmp), max(tmp))
deletion_start_ix = deletion_indices[0]
deletion_end_ix = deletion_indices[1]
deletion_length = deletion_end_ix - deletion_start_ix + 1
id_a_pos_deletion_start = aln_df.ix[deletion_start_ix].id_a_pos
id_a_pos_deletion_end = aln_df.ix[deletion_end_ix].id_a_pos
deletion_region = (id_a_pos_deletion_start, id_a_pos_deletion_end)
# Logging where the insertion is
log.debug('Deletion of length {} at residues {}'.format(deletion_length, deletion_region))
to_append = (deletion_region, deletion_length)
deletions.append(to_append)
return deletions | python | def get_deletions(aln_df):
"""Get a list of tuples indicating the first and last residues of a deletion region, as well as the length of the deletion.
Examples:
# Deletion of residues 1 to 4, length 4
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: 'M', 1: 'G', 2: 'I', 3: 'T'}, 'id_a_pos': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'id_b_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'type': {0: 'deletion', 1: 'deletion', 2: 'deletion', 3: 'deletion'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_deletions(my_alignment)
[((1.0, 4.0), 4)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((deletion_start_resnum, deletion_end_resnum), deletion_length)
"""
deletion_df = aln_df[aln_df['type'] == 'deletion']
if not deletion_df.empty:
deletion_df['id_a_pos'] = deletion_df['id_a_pos'].astype(int)
deletions = []
for k, g in groupby(deletion_df.index, key=lambda n, c=count(): n - next(c)):
tmp = list(g)
deletion_indices = (min(tmp), max(tmp))
deletion_start_ix = deletion_indices[0]
deletion_end_ix = deletion_indices[1]
deletion_length = deletion_end_ix - deletion_start_ix + 1
id_a_pos_deletion_start = aln_df.ix[deletion_start_ix].id_a_pos
id_a_pos_deletion_end = aln_df.ix[deletion_end_ix].id_a_pos
deletion_region = (id_a_pos_deletion_start, id_a_pos_deletion_end)
# Logging where the insertion is
log.debug('Deletion of length {} at residues {}'.format(deletion_length, deletion_region))
to_append = (deletion_region, deletion_length)
deletions.append(to_append)
return deletions | [
"def",
"get_deletions",
"(",
"aln_df",
")",
":",
"deletion_df",
"=",
"aln_df",
"[",
"aln_df",
"[",
"'type'",
"]",
"==",
"'deletion'",
"]",
"if",
"not",
"deletion_df",
".",
"empty",
":",
"deletion_df",
"[",
"'id_a_pos'",
"]",
"=",
"deletion_df",
"[",
"'id_a... | Get a list of tuples indicating the first and last residues of a deletion region, as well as the length of the deletion.
Examples:
# Deletion of residues 1 to 4, length 4
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: 'M', 1: 'G', 2: 'I', 3: 'T'}, 'id_a_pos': {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'id_b_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: np.nan}, 'type': {0: 'deletion', 1: 'deletion', 2: 'deletion', 3: 'deletion'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_deletions(my_alignment)
[((1.0, 4.0), 4)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((deletion_start_resnum, deletion_end_resnum), deletion_length) | [
"Get",
"a",
"list",
"of",
"tuples",
"indicating",
"the",
"first",
"and",
"last",
"residues",
"of",
"a",
"deletion",
"region",
"as",
"well",
"as",
"the",
"length",
"of",
"the",
"deletion",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L394-L437 | train | 28,992 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | get_insertions | def get_insertions(aln_df):
"""Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion.
If the first tuple is:
(-1, 1) that means the insertion is at the beginning of the original protein
(X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein
Examples:
# Insertion at beginning, length 3
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_insertions(my_alignment)
[((-1, 1.0), 3)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length)
"""
insertion_df = aln_df[aln_df['type'] == 'insertion']
# if not insertion_df.empty: # don't need to do this for insertions
# insertion_df['id_a_pos'] = insertion_df['id_a_pos'].astype(int)
insertions = []
for k, g in groupby(insertion_df.index, key=lambda n, c=count(): n - next(c)):
tmp = list(g)
insertion_indices = (min(tmp), max(tmp))
insertion_start = insertion_indices[0] - 1
insertion_end = insertion_indices[1] + 1
# Checking if insertion is at the beginning or end
if insertion_start < 0:
insertion_start = insertion_indices[0]
insertion_length = insertion_end - insertion_start
elif insertion_end >= len(aln_df):
insertion_end = insertion_indices[1]
insertion_length = insertion_end - insertion_start
else:
insertion_length = insertion_end - insertion_start - 1
id_a_pos_insertion_start = aln_df.ix[insertion_start].id_a_pos
id_a_pos_insertion_end = aln_df.ix[insertion_end].id_a_pos
# Checking if insertion is at the beginning or end
if np.isnan(id_a_pos_insertion_start) and id_a_pos_insertion_end == 1:
insertion_region = (-1, id_a_pos_insertion_end)
elif np.isnan(id_a_pos_insertion_end):
insertion_region = (id_a_pos_insertion_start, float('Inf'))
else:
insertion_region = (id_a_pos_insertion_start, id_a_pos_insertion_end)
# Logging where the insertion is
if insertion_region[0] == -1:
log.debug('Insertion of length {} at beginning'.format(insertion_length))
elif insertion_region[1] == float('Inf'):
log.debug('Insertion of length {} at end'.format(insertion_length))
else:
log.debug('Insertion of length {} at residues {}'.format(insertion_length, insertion_region))
to_append = (insertion_region, insertion_length)
insertions.append(to_append)
return insertions | python | def get_insertions(aln_df):
"""Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion.
If the first tuple is:
(-1, 1) that means the insertion is at the beginning of the original protein
(X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein
Examples:
# Insertion at beginning, length 3
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_insertions(my_alignment)
[((-1, 1.0), 3)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length)
"""
insertion_df = aln_df[aln_df['type'] == 'insertion']
# if not insertion_df.empty: # don't need to do this for insertions
# insertion_df['id_a_pos'] = insertion_df['id_a_pos'].astype(int)
insertions = []
for k, g in groupby(insertion_df.index, key=lambda n, c=count(): n - next(c)):
tmp = list(g)
insertion_indices = (min(tmp), max(tmp))
insertion_start = insertion_indices[0] - 1
insertion_end = insertion_indices[1] + 1
# Checking if insertion is at the beginning or end
if insertion_start < 0:
insertion_start = insertion_indices[0]
insertion_length = insertion_end - insertion_start
elif insertion_end >= len(aln_df):
insertion_end = insertion_indices[1]
insertion_length = insertion_end - insertion_start
else:
insertion_length = insertion_end - insertion_start - 1
id_a_pos_insertion_start = aln_df.ix[insertion_start].id_a_pos
id_a_pos_insertion_end = aln_df.ix[insertion_end].id_a_pos
# Checking if insertion is at the beginning or end
if np.isnan(id_a_pos_insertion_start) and id_a_pos_insertion_end == 1:
insertion_region = (-1, id_a_pos_insertion_end)
elif np.isnan(id_a_pos_insertion_end):
insertion_region = (id_a_pos_insertion_start, float('Inf'))
else:
insertion_region = (id_a_pos_insertion_start, id_a_pos_insertion_end)
# Logging where the insertion is
if insertion_region[0] == -1:
log.debug('Insertion of length {} at beginning'.format(insertion_length))
elif insertion_region[1] == float('Inf'):
log.debug('Insertion of length {} at end'.format(insertion_length))
else:
log.debug('Insertion of length {} at residues {}'.format(insertion_length, insertion_region))
to_append = (insertion_region, insertion_length)
insertions.append(to_append)
return insertions | [
"def",
"get_insertions",
"(",
"aln_df",
")",
":",
"insertion_df",
"=",
"aln_df",
"[",
"aln_df",
"[",
"'type'",
"]",
"==",
"'insertion'",
"]",
"# if not insertion_df.empty: # don't need to do this for insertions",
"# insertion_df['id_a_pos'] = insertion_df['id_a_pos'].astype(i... | Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion.
If the first tuple is:
(-1, 1) that means the insertion is at the beginning of the original protein
(X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein
Examples:
# Insertion at beginning, length 3
>>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}}
>>> my_alignment = pd.DataFrame.from_dict(test)
>>> get_insertions(my_alignment)
[((-1, 1.0), 3)]
Args:
aln_df (DataFrame): Alignment DataFrame
Returns:
list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length) | [
"Get",
"a",
"list",
"of",
"tuples",
"indicating",
"the",
"first",
"and",
"last",
"residues",
"of",
"a",
"insertion",
"region",
"as",
"well",
"as",
"the",
"length",
"of",
"the",
"insertion",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L440-L507 | train | 28,993 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | map_resnum_a_to_resnum_b | def map_resnum_a_to_resnum_b(resnums, a_aln, b_aln):
"""Map a residue number in a sequence to the corresponding residue number in an aligned sequence.
Examples:
>>> map_resnum_a_to_resnum_b([1,2,3], '--ABCDEF', 'XXABCDEF')
{1: 3, 2: 4, 3: 5}
>>> map_resnum_a_to_resnum_b(5, '--ABCDEF', 'XXABCDEF')
{5: 7}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF', 'ABCD--')
{}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF--', 'ABCD--GH')
{}
>>> map_resnum_a_to_resnum_b([9,10], '--MKCDLHRLE-E', 'VSNEYSFEGYKLD')
{9: 11, 10: 13}
Args:
resnums (int, list): Residue number or numbers in the first aligned sequence
a_aln (str, Seq, SeqRecord): Aligned sequence string
b_aln (str, Seq, SeqRecord): Aligned sequence string
Returns:
int: Residue number in the second aligned sequence
"""
resnums = ssbio.utils.force_list(resnums)
aln_df = get_alignment_df(a_aln, b_aln)
maps = aln_df[aln_df.id_a_pos.isin(resnums)]
able_to_map_to_b = maps[pd.notnull(maps.id_b_pos)]
successful_map_from_a = able_to_map_to_b.id_a_pos.values.tolist()
mapping = dict([(int(a), int(b)) for a,b in zip(able_to_map_to_b.id_a_pos, able_to_map_to_b.id_b_pos)])
cant_map = list(set(resnums).difference(successful_map_from_a))
if len(cant_map) > 0:
log.warning('Unable to map residue numbers {} in first sequence to second'.format(cant_map))
return mapping | python | def map_resnum_a_to_resnum_b(resnums, a_aln, b_aln):
"""Map a residue number in a sequence to the corresponding residue number in an aligned sequence.
Examples:
>>> map_resnum_a_to_resnum_b([1,2,3], '--ABCDEF', 'XXABCDEF')
{1: 3, 2: 4, 3: 5}
>>> map_resnum_a_to_resnum_b(5, '--ABCDEF', 'XXABCDEF')
{5: 7}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF', 'ABCD--')
{}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF--', 'ABCD--GH')
{}
>>> map_resnum_a_to_resnum_b([9,10], '--MKCDLHRLE-E', 'VSNEYSFEGYKLD')
{9: 11, 10: 13}
Args:
resnums (int, list): Residue number or numbers in the first aligned sequence
a_aln (str, Seq, SeqRecord): Aligned sequence string
b_aln (str, Seq, SeqRecord): Aligned sequence string
Returns:
int: Residue number in the second aligned sequence
"""
resnums = ssbio.utils.force_list(resnums)
aln_df = get_alignment_df(a_aln, b_aln)
maps = aln_df[aln_df.id_a_pos.isin(resnums)]
able_to_map_to_b = maps[pd.notnull(maps.id_b_pos)]
successful_map_from_a = able_to_map_to_b.id_a_pos.values.tolist()
mapping = dict([(int(a), int(b)) for a,b in zip(able_to_map_to_b.id_a_pos, able_to_map_to_b.id_b_pos)])
cant_map = list(set(resnums).difference(successful_map_from_a))
if len(cant_map) > 0:
log.warning('Unable to map residue numbers {} in first sequence to second'.format(cant_map))
return mapping | [
"def",
"map_resnum_a_to_resnum_b",
"(",
"resnums",
",",
"a_aln",
",",
"b_aln",
")",
":",
"resnums",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"resnums",
")",
"aln_df",
"=",
"get_alignment_df",
"(",
"a_aln",
",",
"b_aln",
")",
"maps",
"=",
"aln_df... | Map a residue number in a sequence to the corresponding residue number in an aligned sequence.
Examples:
>>> map_resnum_a_to_resnum_b([1,2,3], '--ABCDEF', 'XXABCDEF')
{1: 3, 2: 4, 3: 5}
>>> map_resnum_a_to_resnum_b(5, '--ABCDEF', 'XXABCDEF')
{5: 7}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF', 'ABCD--')
{}
>>> map_resnum_a_to_resnum_b(5, 'ABCDEF--', 'ABCD--GH')
{}
>>> map_resnum_a_to_resnum_b([9,10], '--MKCDLHRLE-E', 'VSNEYSFEGYKLD')
{9: 11, 10: 13}
Args:
resnums (int, list): Residue number or numbers in the first aligned sequence
a_aln (str, Seq, SeqRecord): Aligned sequence string
b_aln (str, Seq, SeqRecord): Aligned sequence string
Returns:
int: Residue number in the second aligned sequence | [
"Map",
"a",
"residue",
"number",
"in",
"a",
"sequence",
"to",
"the",
"corresponding",
"residue",
"number",
"in",
"an",
"aligned",
"sequence",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L510-L548 | train | 28,994 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | pairwise_alignment_stats | def pairwise_alignment_stats(reference_seq_aln, other_seq_aln):
"""Get a report of a pairwise alignment.
Args:
reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form
other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form
Returns:
dict: Dictionary of information on mutations, insertions, sequence identity, etc.
"""
if len(reference_seq_aln) != len(other_seq_aln):
raise ValueError('Sequence lengths not equal - was an alignment run?')
reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln)
other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln)
infodict = {}
# Percent identity to the reference sequence
stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)
infodict['percent_identity'] = stats_percent_ident
# Other alignment results
aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)
infodict['deletions'] = get_deletions(aln_df)
infodict['insertions'] = get_insertions(aln_df)
infodict['mutations'] = get_mutations(aln_df)
infodict['unresolved'] = get_unresolved(aln_df)
return infodict | python | def pairwise_alignment_stats(reference_seq_aln, other_seq_aln):
"""Get a report of a pairwise alignment.
Args:
reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form
other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form
Returns:
dict: Dictionary of information on mutations, insertions, sequence identity, etc.
"""
if len(reference_seq_aln) != len(other_seq_aln):
raise ValueError('Sequence lengths not equal - was an alignment run?')
reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln)
other_seq_aln = ssbio.protein.sequence.utils.cast_to_str(other_seq_aln)
infodict = {}
# Percent identity to the reference sequence
stats_percent_ident = get_percent_identity(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)
infodict['percent_identity'] = stats_percent_ident
# Other alignment results
aln_df = get_alignment_df(a_aln_seq=reference_seq_aln, b_aln_seq=other_seq_aln)
infodict['deletions'] = get_deletions(aln_df)
infodict['insertions'] = get_insertions(aln_df)
infodict['mutations'] = get_mutations(aln_df)
infodict['unresolved'] = get_unresolved(aln_df)
return infodict | [
"def",
"pairwise_alignment_stats",
"(",
"reference_seq_aln",
",",
"other_seq_aln",
")",
":",
"if",
"len",
"(",
"reference_seq_aln",
")",
"!=",
"len",
"(",
"other_seq_aln",
")",
":",
"raise",
"ValueError",
"(",
"'Sequence lengths not equal - was an alignment run?'",
")",... | Get a report of a pairwise alignment.
Args:
reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form
other_seq_aln (str, Seq, SeqRecord): Other sequence, alignment form
Returns:
dict: Dictionary of information on mutations, insertions, sequence identity, etc. | [
"Get",
"a",
"report",
"of",
"a",
"pairwise",
"alignment",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L551-L581 | train | 28,995 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | needle_statistics | def needle_statistics(infile):
"""Reads in a needle alignment file and spits out statistics of the alignment.
Args:
infile (str): Alignment file name
Returns:
dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.
"""
alignments = list(AlignIO.parse(infile, "emboss"))
alignment_properties = defaultdict(dict)
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "#=======================================":
line = f.readline()
if not line:
raise StopIteration
while line[0] == "#":
# Read in the rest of this alignment header,
# try and discover the number of records expected and their length
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == '1':
a_id = parts[1].strip()
if key == '2':
b_id = parts[1].strip()
if key == 'identity':
ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment_properties[a_id + '_' + b_id]['identity'] = ident_num
alignment_properties[a_id + '_' + b_id]['percent_identity'] = ident_percent
if key == 'similarity':
sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment_properties[a_id + '_' + b_id]['similarity'] = sim_num
alignment_properties[a_id + '_' + b_id]['percent_similarity'] = sim_percent
if key == 'gaps':
gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment_properties[a_id + '_' + b_id]['gaps'] = gap_num
alignment_properties[a_id + '_' + b_id]['percent_gaps'] = gap_percent
if key == 'score':
score = float(parts[1].strip())
alignment_properties[a_id + '_' + b_id]['score'] = score
# And read in another line...
line = f.readline()
return alignment_properties | python | def needle_statistics(infile):
"""Reads in a needle alignment file and spits out statistics of the alignment.
Args:
infile (str): Alignment file name
Returns:
dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc.
"""
alignments = list(AlignIO.parse(infile, "emboss"))
alignment_properties = defaultdict(dict)
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "#=======================================":
line = f.readline()
if not line:
raise StopIteration
while line[0] == "#":
# Read in the rest of this alignment header,
# try and discover the number of records expected and their length
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == '1':
a_id = parts[1].strip()
if key == '2':
b_id = parts[1].strip()
if key == 'identity':
ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment_properties[a_id + '_' + b_id]['identity'] = ident_num
alignment_properties[a_id + '_' + b_id]['percent_identity'] = ident_percent
if key == 'similarity':
sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment_properties[a_id + '_' + b_id]['similarity'] = sim_num
alignment_properties[a_id + '_' + b_id]['percent_similarity'] = sim_percent
if key == 'gaps':
gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment_properties[a_id + '_' + b_id]['gaps'] = gap_num
alignment_properties[a_id + '_' + b_id]['percent_gaps'] = gap_percent
if key == 'score':
score = float(parts[1].strip())
alignment_properties[a_id + '_' + b_id]['score'] = score
# And read in another line...
line = f.readline()
return alignment_properties | [
"def",
"needle_statistics",
"(",
"infile",
")",
":",
"alignments",
"=",
"list",
"(",
"AlignIO",
".",
"parse",
"(",
"infile",
",",
"\"emboss\"",
")",
")",
"alignment_properties",
"=",
"defaultdict",
"(",
"dict",
")",
"with",
"open",
"(",
"infile",
")",
"as"... | Reads in a needle alignment file and spits out statistics of the alignment.
Args:
infile (str): Alignment file name
Returns:
dict: alignment_properties - a dictionary telling you the number of gaps, identity, etc. | [
"Reads",
"in",
"a",
"needle",
"alignment",
"file",
"and",
"spits",
"out",
"statistics",
"of",
"the",
"alignment",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L584-L641 | train | 28,996 |
SBRG/ssbio | ssbio/protein/sequence/utils/alignment.py | needle_statistics_alignio | def needle_statistics_alignio(infile):
"""Reads in a needle alignment file and returns an AlignIO object with annotations
Args:
infile (str): Alignment file name
Returns:
AlignIO: annotated AlignIO object
"""
alignments = list(AlignIO.parse(infile, "emboss"))
if len(alignments) > 1:
raise ValueError('Alignment file contains more than one pairwise alignment')
alignment = alignments[0]
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "#=======================================":
line = f.readline()
if not line:
raise StopIteration
while line[0] == "#":
# Read in the rest of this alignment header,
# try and discover the number of records expected and their length
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == 'identity':
ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment.annotations['identity'] = ident_num
alignment.annotations['percent_identity'] = ident_percent
if key == 'similarity':
sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment.annotations['similarity'] = sim_num
alignment.annotations['percent_similarity'] = sim_percent
if key == 'gaps':
gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment.annotations['gaps'] = gap_num
alignment.annotations['percent_gaps'] = gap_percent
if key == 'score':
score = float(parts[1].strip())
alignment.annotations['score'] = score
# And read in another line...
line = f.readline()
return alignment | python | def needle_statistics_alignio(infile):
"""Reads in a needle alignment file and returns an AlignIO object with annotations
Args:
infile (str): Alignment file name
Returns:
AlignIO: annotated AlignIO object
"""
alignments = list(AlignIO.parse(infile, "emboss"))
if len(alignments) > 1:
raise ValueError('Alignment file contains more than one pairwise alignment')
alignment = alignments[0]
with open(infile) as f:
line = f.readline()
for i in range(len(alignments)):
while line.rstrip() != "#=======================================":
line = f.readline()
if not line:
raise StopIteration
while line[0] == "#":
# Read in the rest of this alignment header,
# try and discover the number of records expected and their length
parts = line[1:].split(":", 1)
key = parts[0].lower().strip()
if key == 'identity':
ident_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
ident_num = int(ident_parse[0].split('/')[0])
ident_percent = float(ident_parse[1])
alignment.annotations['identity'] = ident_num
alignment.annotations['percent_identity'] = ident_percent
if key == 'similarity':
sim_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
sim_num = int(sim_parse[0].split('/')[0])
sim_percent = float(sim_parse[1])
alignment.annotations['similarity'] = sim_num
alignment.annotations['percent_similarity'] = sim_percent
if key == 'gaps':
gap_parse = parts[1].strip().replace('(','').replace(')','').replace('%','').split()
gap_num = int(gap_parse[0].split('/')[0])
gap_percent = float(gap_parse[1])
alignment.annotations['gaps'] = gap_num
alignment.annotations['percent_gaps'] = gap_percent
if key == 'score':
score = float(parts[1].strip())
alignment.annotations['score'] = score
# And read in another line...
line = f.readline()
return alignment | [
"def",
"needle_statistics_alignio",
"(",
"infile",
")",
":",
"alignments",
"=",
"list",
"(",
"AlignIO",
".",
"parse",
"(",
"infile",
",",
"\"emboss\"",
")",
")",
"if",
"len",
"(",
"alignments",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Alignment fi... | Reads in a needle alignment file and returns an AlignIO object with annotations
Args:
infile (str): Alignment file name
Returns:
AlignIO: annotated AlignIO object | [
"Reads",
"in",
"a",
"needle",
"alignment",
"file",
"and",
"returns",
"an",
"AlignIO",
"object",
"with",
"annotations"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/alignment.py#L644-L701 | train | 28,997 |
SBRG/ssbio | ssbio/protein/structure/utils/foldx.py | FoldX.run_repair_pdb | def run_repair_pdb(self, silent=False, force_rerun=False):
"""Run FoldX RepairPDB on this PDB file.
Original command::
foldx --command=RepairPDB --pdb=4bxi.pdb
Args:
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.
"""
# Create RepairPDB command
foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file)
# Repaired PDB output file name
foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0])
# Run RepairPDB
ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent,
outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir)
# TODO: write stdout/stderr to log file somewhere!
self.repaired_pdb_outfile = foldx_repair_outfile | python | def run_repair_pdb(self, silent=False, force_rerun=False):
"""Run FoldX RepairPDB on this PDB file.
Original command::
foldx --command=RepairPDB --pdb=4bxi.pdb
Args:
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists.
"""
# Create RepairPDB command
foldx_repair_pdb = 'foldx --command=RepairPDB --pdb={}'.format(self.pdb_file)
# Repaired PDB output file name
foldx_repair_outfile = '{}_Repair.pdb'.format(op.splitext(self.pdb_file)[0])
# Run RepairPDB
ssbio.utils.command_runner(shell_command=foldx_repair_pdb, force_rerun_flag=force_rerun, silent=silent,
outfile_checker=foldx_repair_outfile, cwd=self.foldx_dir)
# TODO: write stdout/stderr to log file somewhere!
self.repaired_pdb_outfile = foldx_repair_outfile | [
"def",
"run_repair_pdb",
"(",
"self",
",",
"silent",
"=",
"False",
",",
"force_rerun",
"=",
"False",
")",
":",
"# Create RepairPDB command",
"foldx_repair_pdb",
"=",
"'foldx --command=RepairPDB --pdb={}'",
".",
"format",
"(",
"self",
".",
"pdb_file",
")",
"# Repaire... | Run FoldX RepairPDB on this PDB file.
Original command::
foldx --command=RepairPDB --pdb=4bxi.pdb
Args:
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX RepairPDB should be rerun even if a repaired file exists. | [
"Run",
"FoldX",
"RepairPDB",
"on",
"this",
"PDB",
"file",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L107-L131 | train | 28,998 |
SBRG/ssbio | ssbio/protein/structure/utils/foldx.py | FoldX.create_mutation_file | def create_mutation_file(self, list_of_tuples):
"""Create the FoldX file 'individual_list.txt' to run BuildModel upon.
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example::
[
(('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')), # Mutation group 1
(('S', 'A', 321, 'R'), ('T', 'A', 345, 'S')) # Mutation group 2
]
"""
self.mutation_infile = op.join(self.foldx_dir, 'individual_list.txt')
idx = 1
with open(self.mutation_infile, 'w') as f:
for mutant_group in list_of_tuples:
# Write the mutation string to the file
mutstring = ''.join(list(map(lambda x: '{}{}{}{};'.format(x[0], x[1], x[2], x[3]), mutant_group)))
f.write(mutstring + '\n')
# Also keep track of the index being used for this mutation
self.mutation_index_to_group[idx] = mutant_group
idx += 1 | python | def create_mutation_file(self, list_of_tuples):
"""Create the FoldX file 'individual_list.txt' to run BuildModel upon.
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example::
[
(('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')), # Mutation group 1
(('S', 'A', 321, 'R'), ('T', 'A', 345, 'S')) # Mutation group 2
]
"""
self.mutation_infile = op.join(self.foldx_dir, 'individual_list.txt')
idx = 1
with open(self.mutation_infile, 'w') as f:
for mutant_group in list_of_tuples:
# Write the mutation string to the file
mutstring = ''.join(list(map(lambda x: '{}{}{}{};'.format(x[0], x[1], x[2], x[3]), mutant_group)))
f.write(mutstring + '\n')
# Also keep track of the index being used for this mutation
self.mutation_index_to_group[idx] = mutant_group
idx += 1 | [
"def",
"create_mutation_file",
"(",
"self",
",",
"list_of_tuples",
")",
":",
"self",
".",
"mutation_infile",
"=",
"op",
".",
"join",
"(",
"self",
".",
"foldx_dir",
",",
"'individual_list.txt'",
")",
"idx",
"=",
"1",
"with",
"open",
"(",
"self",
".",
"mutat... | Create the FoldX file 'individual_list.txt' to run BuildModel upon.
Args:
list_of_tuples (list): A list of tuples indicating mutation groups to carry out BuildModel upon. Example::
[
(('N', 'A', 308, 'S'), ('S', 'A', 320, 'T'), ('S', 'A', 321, 'H')), # Mutation group 1
(('S', 'A', 321, 'R'), ('T', 'A', 345, 'S')) # Mutation group 2
] | [
"Create",
"the",
"FoldX",
"file",
"individual_list",
".",
"txt",
"to",
"run",
"BuildModel",
"upon",
"."
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/utils/foldx.py#L133-L158 | train | 28,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.